summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/pull_request_template.md6
-rw-r--r--.gitignore67
-rwxr-xr-xBUILD/SETUP.sh11
-rw-r--r--CMakeLists.txt3
-rw-r--r--CODING_STANDARDS.md301
-rw-r--r--CONTRIBUTING.md1
-rw-r--r--Docs/optimizer_costs.txt1309
-rw-r--r--VERSION8
-rw-r--r--client/mariadb-conv.cc8
-rw-r--r--client/mysql.cc107
-rw-r--r--client/mysql_plugin.c11
-rw-r--r--client/mysql_upgrade.c31
-rw-r--r--client/mysqladmin.cc9
-rw-r--r--client/mysqlbinlog.cc341
-rw-r--r--client/mysqlcheck.c10
-rw-r--r--client/mysqldump.c13
-rw-r--r--client/mysqlimport.c9
-rw-r--r--client/mysqlshow.c9
-rw-r--r--client/mysqlslap.c9
-rw-r--r--client/mysqltest.cc9
-rw-r--r--cmake/configure.pl2
-rw-r--r--cmake/cpack_rpm.cmake39
-rw-r--r--cmake/install_macros.cmake59
-rw-r--r--cmake/mysql_add_executable.cmake2
-rw-r--r--cmake/plugin.cmake5
-rw-r--r--debian/additions/innotop/innotop6
-rw-r--r--debian/additions/mariadb.conf.d/50-mariadb-clients.cnf22
-rw-r--r--debian/additions/mariadb.conf.d/50-mariadb_safe.cnf (renamed from debian/additions/mariadb.conf.d/50-mysqld_safe.cnf)6
-rw-r--r--debian/additions/mariadb.conf.d/50-mysql-clients.cnf22
-rw-r--r--debian/additions/mariadb.conf.d/50-server.cnf16
-rw-r--r--debian/changelog2
-rw-r--r--debian/control128
-rw-r--r--debian/mariadb-client-compat.install38
-rw-r--r--debian/mariadb-client-compat.links9
-rw-r--r--debian/mariadb-client-core.install3
-rw-r--r--debian/mariadb-client.install30
-rw-r--r--debian/mariadb-client.links9
-rw-r--r--debian/mariadb-server-compat.install12
-rw-r--r--debian/mariadb-server-core.install6
-rw-r--r--debian/mariadb-server.README.Debian53
-rw-r--r--debian/mariadb-server.install15
-rw-r--r--debian/source/lintian-overrides16
-rw-r--r--extra/CMakeLists.txt2
-rw-r--r--extra/innochecksum.cc16
-rw-r--r--extra/mariabackup/CMakeLists.txt4
-rw-r--r--extra/mariabackup/fil_cur.cc6
-rw-r--r--extra/mariabackup/xbstream.cc11
-rw-r--r--extra/mariabackup/xtrabackup.cc45
-rw-r--r--extra/my_print_defaults.c13
-rw-r--r--extra/mysql_waitpid.c9
-rw-r--r--extra/perror.c9
-rw-r--r--extra/replace.c6
-rw-r--r--extra/resolve_stack_dump.c10
-rw-r--r--extra/resolveip.c10
-rw-r--r--include/my_compare.h1
-rw-r--r--include/my_getopt.h1
-rw-r--r--include/my_global.h1
-rw-r--r--include/my_sys.h14
-rw-r--r--include/my_tracker.h41
-rw-r--r--include/myisam.h2
-rw-r--r--include/mysql_com.h21
-rw-r--r--include/mysys_err.h3
-rw-r--r--include/welcome_copyright_notice.h8
-rw-r--r--man/CMakeLists.txt106
-rw-r--r--man/mariadb-access.1 (renamed from man/mysqlaccess.1)0
-rw-r--r--man/mariadb-admin.1 (renamed from man/mysqladmin.1)0
-rw-r--r--man/mariadb-backup.1 (renamed from man/mariabackup.1)0
-rw-r--r--man/mariadb-binlog.1 (renamed from man/mysqlbinlog.1)0
-rw-r--r--man/mariadb-check.1 (renamed from man/mysqlcheck.1)0
-rw-r--r--man/mariadb-client-test-embedded.11
-rw-r--r--man/mariadb-client-test.1 (renamed from man/mysql_client_test.1)0
-rw-r--r--man/mariadb-convert-table-format.1 (renamed from man/mysql_convert_table_format.1)0
-rw-r--r--man/mariadb-dump.1 (renamed from man/mysqldump.1)0
-rw-r--r--man/mariadb-dumpslow.1 (renamed from man/mysqldumpslow.1)0
-rw-r--r--man/mariadb-embedded.11
-rw-r--r--man/mariadb-find-rows.1 (renamed from man/mysql_find_rows.1)0
-rw-r--r--man/mariadb-fix-extensions.1 (renamed from man/mysql_fix_extensions.1)2
-rw-r--r--man/mariadb-hotcopy.1 (renamed from man/mysqlhotcopy.1)0
-rw-r--r--man/mariadb-import.1 (renamed from man/mysqlimport.1)0
-rw-r--r--man/mariadb-install-db.1 (renamed from man/mysql_install_db.1)0
-rw-r--r--man/mariadb-plugin.1 (renamed from man/mysql_plugin.1)0
-rw-r--r--man/mariadb-secure-installation.1 (renamed from man/mysql_secure_installation.1)0
-rw-r--r--man/mariadb-setpermission.1 (renamed from man/mysql_setpermission.1)0
-rw-r--r--man/mariadb-show.1 (renamed from man/mysqlshow.1)0
-rw-r--r--man/mariadb-slap.1 (renamed from man/mysqlslap.1)0
-rw-r--r--man/mariadb-test-embedded.11
-rw-r--r--man/mariadb-test.1 (renamed from man/mysqltest.1)0
-rw-r--r--man/mariadb-tzinfo-to-sql.1 (renamed from man/mysql_tzinfo_to_sql.1)0
-rw-r--r--man/mariadb-upgrade.1 (renamed from man/mysql_upgrade.1)0
-rw-r--r--man/mariadb-waitpid.1 (renamed from man/mysql_waitpid.1)0
-rw-r--r--man/mariadb.1 (renamed from man/mysql.1)25
-rw-r--r--man/mariadb_config.1 (renamed from man/mysql_config.1)0
-rw-r--r--man/mariadbd-multi.1 (renamed from man/mysqld_multi.1)0
-rw-r--r--man/mariadbd-safe-helper.1 (renamed from man/mysqld_safe_helper.1)0
-rw-r--r--man/mariadbd-safe.1 (renamed from man/mysqld_safe.1)0
-rw-r--r--man/mariadbd.8 (renamed from man/mysqld.8)0
-rw-r--r--man/mysql_client_test_embedded.11
-rw-r--r--man/mysql_embedded.11
-rw-r--r--man/mysqltest_embedded.11
-rw-r--r--mysql-test/include/analyze-format.inc2
-rw-r--r--mysql-test/include/analyze-no-filtered.inc2
-rw-r--r--mysql-test/include/check-testcase.test2
-rw-r--r--mysql-test/include/common-tests.inc20
-rw-r--r--mysql-test/include/ctype_numconv.inc1
-rw-r--r--mysql-test/include/explain-no-costs-filtered.inc1
-rw-r--r--mysql-test/include/explain-no-costs.inc1
-rw-r--r--mysql-test/include/explain_non_select.inc16
-rw-r--r--mysql-test/include/galera_variables_ok.inc2
-rw-r--r--mysql-test/include/icp_tests.inc6
-rw-r--r--mysql-test/include/index_merge1.inc2
-rw-r--r--mysql-test/include/last_query_cost.inc5
-rw-r--r--mysql-test/include/load_dump_and_upgrade.inc2
-rw-r--r--mysql-test/include/mix1.inc8
-rw-r--r--mysql-test/include/percona_nonflushing_analyze_debug.inc2
-rw-r--r--mysql-test/include/rowid_filter_debug_kill.inc22
-rw-r--r--mysql-test/include/world.inc2
-rw-r--r--mysql-test/lib/My/Debugger.pm2
-rw-r--r--mysql-test/main/alter_table_combinations,aria.rdiff4
-rw-r--r--mysql-test/main/alter_table_combinations,heap.rdiff4
-rw-r--r--mysql-test/main/analyze_format_json.result97
-rw-r--r--mysql-test/main/analyze_format_json.test2
-rw-r--r--mysql-test/main/analyze_stmt.result4
-rw-r--r--mysql-test/main/analyze_stmt_orderby.result47
-rw-r--r--mysql-test/main/analyze_stmt_orderby.test6
-rw-r--r--mysql-test/main/analyze_stmt_privileges2.result86
-rw-r--r--mysql-test/main/brackets.result24
-rw-r--r--mysql-test/main/brackets.test5
-rw-r--r--mysql-test/main/bug12427262.result6
-rw-r--r--mysql-test/main/bug46760-master.opt1
-rw-r--r--mysql-test/main/column_compression_parts.test1
-rw-r--r--mysql-test/main/comments.result10
-rw-r--r--mysql-test/main/comments.test6
-rw-r--r--mysql-test/main/compress.result19
-rw-r--r--mysql-test/main/costs.result126
-rw-r--r--mysql-test/main/costs.test116
-rw-r--r--mysql-test/main/crash_commit_before-master.opt1
-rw-r--r--mysql-test/main/cte_nonrecursive.result28
-rw-r--r--mysql-test/main/cte_recursive.result149
-rw-r--r--mysql-test/main/cte_recursive.test3
-rw-r--r--mysql-test/main/ctype_binary.result1
-rw-r--r--mysql-test/main/ctype_collate.result2
-rw-r--r--mysql-test/main/ctype_cp1251.result1
-rw-r--r--mysql-test/main/ctype_gbk.result4
-rw-r--r--mysql-test/main/ctype_latin1.result1
-rw-r--r--mysql-test/main/ctype_ucs.result5
-rw-r--r--mysql-test/main/ctype_upgrade.test2
-rw-r--r--mysql-test/main/ctype_utf8.result1
-rw-r--r--mysql-test/main/custom_aggregates_i_s.result2
-rw-r--r--mysql-test/main/delete.result9
-rw-r--r--mysql-test/main/delete.test7
-rw-r--r--mysql-test/main/delete_innodb.result2
-rw-r--r--mysql-test/main/derived.result153
-rw-r--r--mysql-test/main/derived.test101
-rw-r--r--mysql-test/main/derived_cond_pushdown.result2623
-rw-r--r--mysql-test/main/derived_cond_pushdown.test206
-rw-r--r--mysql-test/main/derived_opt.result9
-rw-r--r--mysql-test/main/derived_split_innodb.result40
-rw-r--r--mysql-test/main/derived_split_innodb.test2
-rw-r--r--mysql-test/main/derived_view.result135
-rw-r--r--mysql-test/main/derived_view.test8
-rw-r--r--mysql-test/main/desc_index_range.result2
-rw-r--r--mysql-test/main/distinct.result24
-rw-r--r--mysql-test/main/distinct.test6
-rw-r--r--mysql-test/main/events_bugs.result4
-rw-r--r--mysql-test/main/except.result66
-rw-r--r--mysql-test/main/except.test2
-rw-r--r--mysql-test/main/except_all.result72
-rw-r--r--mysql-test/main/except_all.test4
-rw-r--r--mysql-test/main/explain.result17
-rw-r--r--mysql-test/main/explain.test3
-rw-r--r--mysql-test/main/explain_innodb.result2
-rw-r--r--mysql-test/main/explain_json.result317
-rw-r--r--mysql-test/main/explain_json.test64
-rw-r--r--mysql-test/main/explain_json_format_partitions.result6
-rw-r--r--mysql-test/main/explain_json_format_partitions.test1
-rw-r--r--mysql-test/main/explain_json_innodb.result11
-rw-r--r--mysql-test/main/explain_json_innodb.test1
-rw-r--r--mysql-test/main/explain_non_select.result2
-rw-r--r--mysql-test/main/features.result22
-rw-r--r--mysql-test/main/features.test16
-rw-r--r--mysql-test/main/fetch_first.result76
-rw-r--r--mysql-test/main/fetch_first.test19
-rw-r--r--mysql-test/main/filesort_debug.result1
-rw-r--r--mysql-test/main/filesort_debug.test2
-rw-r--r--mysql-test/main/flush-innodb.result5
-rw-r--r--mysql-test/main/flush-innodb.test7
-rw-r--r--mysql-test/main/fulltext.result7
-rw-r--r--mysql-test/main/fulltext.test2
-rw-r--r--mysql-test/main/func_group.result5
-rw-r--r--mysql-test/main/func_group_innodb.result6
-rw-r--r--mysql-test/main/func_group_innodb.test2
-rw-r--r--mysql-test/main/func_str.result6
-rw-r--r--mysql-test/main/func_time.result8
-rw-r--r--mysql-test/main/grant.result4
-rw-r--r--mysql-test/main/grant2.result4
-rw-r--r--mysql-test/main/grant_binlog_replay.result21
-rw-r--r--mysql-test/main/grant_binlog_replay.test27
-rw-r--r--mysql-test/main/grant_kill.result24
-rw-r--r--mysql-test/main/grant_kill.test29
-rw-r--r--mysql-test/main/grant_server.result18
-rw-r--r--mysql-test/main/grant_server.test15
-rw-r--r--mysql-test/main/grant_slave_admin.result29
-rw-r--r--mysql-test/main/grant_slave_admin.test25
-rw-r--r--mysql-test/main/grant_slave_monitor.result28
-rw-r--r--mysql-test/main/grant_slave_monitor.test33
-rw-r--r--mysql-test/main/greedy_optimizer.result216
-rw-r--r--mysql-test/main/greedy_optimizer.test28
-rw-r--r--mysql-test/main/group_by.result68
-rw-r--r--mysql-test/main/group_by.test31
-rw-r--r--mysql-test/main/group_min_max.result70
-rw-r--r--mysql-test/main/group_min_max.test23
-rw-r--r--mysql-test/main/group_min_max_innodb.result22
-rw-r--r--mysql-test/main/having_cond_pushdown.result466
-rw-r--r--mysql-test/main/having_cond_pushdown.test146
-rw-r--r--mysql-test/main/ignored_index.result14
-rw-r--r--mysql-test/main/ignored_index.test8
-rw-r--r--mysql-test/main/in_subq_cond_pushdown.result344
-rw-r--r--mysql-test/main/in_subq_cond_pushdown.test38
-rw-r--r--mysql-test/main/index_intersect.result25
-rw-r--r--mysql-test/main/index_intersect.test1
-rw-r--r--mysql-test/main/index_intersect_innodb.result31
-rw-r--r--mysql-test/main/index_merge_innodb.result8
-rw-r--r--mysql-test/main/index_merge_myisam.result20
-rw-r--r--mysql-test/main/information_schema-big.result2
-rw-r--r--mysql-test/main/information_schema.result3
-rw-r--r--mysql-test/main/information_schema_all_engines.result8
-rw-r--r--mysql-test/main/information_schema_db.result4
-rw-r--r--mysql-test/main/innodb_ext_key,off.rdiff26
-rw-r--r--mysql-test/main/innodb_ext_key.result35
-rw-r--r--mysql-test/main/innodb_ext_key.test22
-rw-r--r--mysql-test/main/innodb_icp.result20
-rw-r--r--mysql-test/main/innodb_mysql_lock2.result14
-rw-r--r--mysql-test/main/innodb_mysql_lock2.test1
-rw-r--r--mysql-test/main/intersect.result63
-rw-r--r--mysql-test/main/intersect.test2
-rw-r--r--mysql-test/main/intersect_all.result63
-rw-r--r--mysql-test/main/intersect_all.test4
-rw-r--r--mysql-test/main/invisible_field.result4
-rw-r--r--mysql-test/main/invisible_field_debug.result6
-rw-r--r--mysql-test/main/join.result63
-rw-r--r--mysql-test/main/join.test33
-rw-r--r--mysql-test/main/join_cache.result139
-rw-r--r--mysql-test/main/join_cache.test66
-rw-r--r--mysql-test/main/join_nested.result50
-rw-r--r--mysql-test/main/join_nested.test1
-rw-r--r--mysql-test/main/join_nested_jcl6.result66
-rw-r--r--mysql-test/main/join_outer.result14
-rw-r--r--mysql-test/main/join_outer.test2
-rw-r--r--mysql-test/main/join_outer_innodb.result40
-rw-r--r--mysql-test/main/join_outer_jcl6.result14
-rw-r--r--mysql-test/main/key.result52
-rw-r--r--mysql-test/main/key.test23
-rw-r--r--mysql-test/main/key_cache.result42
-rw-r--r--mysql-test/main/key_cache.test12
-rw-r--r--mysql-test/main/key_diff.result2
-rw-r--r--mysql-test/main/limit_rows_examined.result50
-rw-r--r--mysql-test/main/limit_rows_examined.test6
-rw-r--r--mysql-test/main/lock_sync-master.opt1
-rw-r--r--mysql-test/main/lock_sync.result16
-rw-r--r--mysql-test/main/locking_clause.result8
-rw-r--r--mysql-test/main/log_slow_debug.result16
-rw-r--r--mysql-test/main/log_tables_upgrade.test2
-rw-r--r--mysql-test/main/long_unique.result18
-rw-r--r--mysql-test/main/long_unique.test1
-rw-r--r--mysql-test/main/lowercase_fs_off.test2
-rw-r--r--mysql-test/main/mdev-25830.result10
-rw-r--r--mysql-test/main/merge.result46
-rw-r--r--mysql-test/main/merge.test39
-rw-r--r--mysql-test/main/metadata.result8
-rw-r--r--mysql-test/main/mrr_derived_crash_4610.result2
-rw-r--r--mysql-test/main/multi_update.result9
-rw-r--r--mysql-test/main/multi_update.test3
-rw-r--r--mysql-test/main/myisam.result57
-rw-r--r--mysql-test/main/myisam.test41
-rw-r--r--mysql-test/main/myisam_debug.result2
-rw-r--r--mysql-test/main/myisam_explain_non_select_all.result416
-rw-r--r--mysql-test/main/myisam_icp.result28
-rw-r--r--mysql-test/main/myisam_icp.test2
-rw-r--r--mysql-test/main/myisam_mrr,32bit.rdiff4
-rw-r--r--mysql-test/main/myisam_mrr.result5
-rw-r--r--mysql-test/main/myisam_mrr.test5
-rw-r--r--mysql-test/main/mysql.result3
-rw-r--r--mysql-test/main/mysql.test11
-rw-r--r--mysql-test/main/mysql_client_test.result2
-rw-r--r--mysql-test/main/mysql_json_mysql_upgrade.test2
-rw-r--r--mysql-test/main/mysql_json_mysql_upgrade_with_plugin_loaded.test2
-rw-r--r--mysql-test/main/mysql_upgrade-20228.test4
-rw-r--r--mysql-test/main/mysql_upgrade-6984.test4
-rw-r--r--mysql-test/main/mysql_upgrade.result20
-rw-r--r--mysql-test/main/mysql_upgrade.test79
-rw-r--r--mysql-test/main/mysql_upgrade_mysql_json_datatype.test2
-rw-r--r--mysql-test/main/mysql_upgrade_no_innodb.test2
-rw-r--r--mysql-test/main/mysql_upgrade_noengine.result4
-rw-r--r--mysql-test/main/mysql_upgrade_noengine.test10
-rw-r--r--mysql-test/main/mysql_upgrade_ssl.test2
-rw-r--r--mysql-test/main/mysql_upgrade_to_100502.result10
-rw-r--r--mysql-test/main/mysql_upgrade_to_100502.test2
-rw-r--r--mysql-test/main/mysql_upgrade_view.test2
-rw-r--r--mysql-test/main/mysqlbinlog.result37
-rw-r--r--mysql-test/main/mysqlbinlog.test8
-rw-r--r--mysql-test/main/mysqld--help.result61
-rw-r--r--mysql-test/main/mysqld--help.test1
-rw-r--r--mysql-test/main/mysqldump.result6
-rw-r--r--mysql-test/main/named_pipe.result19
-rw-r--r--mysql-test/main/null_key.result8
-rw-r--r--mysql-test/main/null_key.test7
-rw-r--r--mysql-test/main/opt_trace.result6066
-rw-r--r--mysql-test/main/opt_trace.test164
-rw-r--r--mysql-test/main/opt_trace_index_merge.result169
-rw-r--r--mysql-test/main/opt_trace_index_merge_innodb.result53
-rw-r--r--mysql-test/main/opt_trace_security.result66
-rw-r--r--mysql-test/main/opt_trace_selectivity.result369
-rw-r--r--mysql-test/main/opt_trace_selectivity.test86
-rw-r--r--mysql-test/main/opt_trace_ucs2.result5
-rw-r--r--mysql-test/main/opt_trace_ucs2.test1
-rw-r--r--mysql-test/main/opt_tvc.result193
-rw-r--r--mysql-test/main/opt_tvc.test19
-rw-r--r--mysql-test/main/optimizer_costs.result347
-rw-r--r--mysql-test/main/optimizer_costs.test189
-rw-r--r--mysql-test/main/optimizer_costs2.opt1
-rw-r--r--mysql-test/main/optimizer_costs2.result8
-rw-r--r--mysql-test/main/optimizer_costs2.test6
-rw-r--r--mysql-test/main/order_by.result292
-rw-r--r--mysql-test/main/order_by.test18
-rw-r--r--mysql-test/main/order_by_innodb.result26
-rw-r--r--mysql-test/main/order_by_innodb.test8
-rw-r--r--mysql-test/main/order_by_pack_big.result12
-rw-r--r--mysql-test/main/order_by_sortkey.result35
-rw-r--r--mysql-test/main/order_by_sortkey.test21
-rw-r--r--mysql-test/main/outfile_loaddata.result5
-rw-r--r--mysql-test/main/parser.result5
-rw-r--r--mysql-test/main/partition.result4
-rw-r--r--mysql-test/main/partition_exchange.result2
-rw-r--r--mysql-test/main/partition_exchange.test2
-rw-r--r--mysql-test/main/partition_explicit_prune.result4
-rw-r--r--mysql-test/main/partition_innodb2.result24
-rw-r--r--mysql-test/main/partition_innodb2.test30
-rw-r--r--mysql-test/main/partition_innodb_plugin.result6
-rw-r--r--mysql-test/main/partition_mrr_aria.result4
-rw-r--r--mysql-test/main/partition_mrr_innodb.result4
-rw-r--r--mysql-test/main/partition_mrr_myisam.result6
-rw-r--r--mysql-test/main/partition_pruning.result32
-rw-r--r--mysql-test/main/partition_pruning.test6
-rw-r--r--mysql-test/main/partition_range.result76
-rw-r--r--mysql-test/main/partition_range.test35
-rw-r--r--mysql-test/main/percona_nonflushing_analyze_debug.result2
-rw-r--r--mysql-test/main/plugin_auth.test2
-rw-r--r--mysql-test/main/pool_of_threads.result19
-rw-r--r--mysql-test/main/ps.result8
-rw-r--r--mysql-test/main/ps_1general.result2
-rw-r--r--mysql-test/main/ps_1general.test4
-rw-r--r--mysql-test/main/ps_ddl.result6
-rw-r--r--mysql-test/main/ps_ddl1.result2
-rw-r--r--mysql-test/main/query_cache.result6
-rw-r--r--mysql-test/main/range.result185
-rw-r--r--mysql-test/main/range.test40
-rw-r--r--mysql-test/main/range_aria_dbt3.result10
-rw-r--r--mysql-test/main/range_aria_dbt3.test6
-rw-r--r--mysql-test/main/range_innodb.result7
-rw-r--r--mysql-test/main/range_innodb.test4
-rw-r--r--mysql-test/main/range_interrupted-13751.result14
-rw-r--r--mysql-test/main/range_interrupted-13751.test14
-rw-r--r--mysql-test/main/range_mrr_icp.result185
-rw-r--r--mysql-test/main/range_notembedded.result3
-rw-r--r--mysql-test/main/range_notembedded.test1
-rw-r--r--mysql-test/main/range_vs_index_merge.result61
-rw-r--r--mysql-test/main/range_vs_index_merge.test16
-rw-r--r--mysql-test/main/range_vs_index_merge_innodb.result59
-rw-r--r--mysql-test/main/rowid_filter.result1298
-rw-r--r--mysql-test/main/rowid_filter.test401
-rw-r--r--mysql-test/main/rowid_filter_aria.result2245
-rw-r--r--mysql-test/main/rowid_filter_aria.test9
-rw-r--r--mysql-test/main/rowid_filter_innodb.result1210
-rw-r--r--mysql-test/main/rowid_filter_innodb.test9
-rw-r--r--mysql-test/main/rowid_filter_innodb_debug.result19
-rw-r--r--mysql-test/main/rowid_filter_innodb_debug.test2
-rw-r--r--mysql-test/main/rowid_filter_myisam.result662
-rw-r--r--mysql-test/main/rowid_filter_myisam.test405
-rw-r--r--mysql-test/main/rowid_filter_myisam_debug.result19
-rw-r--r--mysql-test/main/rpl_mysql_upgrade_slave_repo_check.test4
-rw-r--r--mysql-test/main/select.result82
-rw-r--r--mysql-test/main/select.test44
-rw-r--r--mysql-test/main/select_jcl6.result90
-rw-r--r--mysql-test/main/select_pkeycache.result82
-rw-r--r--mysql-test/main/select_safe.result1
-rw-r--r--mysql-test/main/select_safe.test1
-rw-r--r--mysql-test/main/selectivity.result164
-rw-r--r--mysql-test/main/selectivity.test107
-rw-r--r--mysql-test/main/selectivity_innodb.result249
-rw-r--r--mysql-test/main/selectivity_innodb.test58
-rw-r--r--mysql-test/main/selectivity_no_engine.result19
-rw-r--r--mysql-test/main/selectivity_no_engine.test21
-rw-r--r--mysql-test/main/set_operation.result48
-rw-r--r--mysql-test/main/set_operation.test1
-rw-r--r--mysql-test/main/show_analyze.result3
-rw-r--r--mysql-test/main/show_analyze_json.result102
-rw-r--r--mysql-test/main/show_explain.result4
-rw-r--r--mysql-test/main/show_explain_json.result117
-rw-r--r--mysql-test/main/show_explain_json.test28
-rw-r--r--mysql-test/main/signal_demo1.result7
-rw-r--r--mysql-test/main/single_delete_update.result50
-rw-r--r--mysql-test/main/single_delete_update.test2
-rw-r--r--mysql-test/main/skr.result54
-rw-r--r--mysql-test/main/skr.test56
-rw-r--r--mysql-test/main/sp-anchor-row-type-cursor.result6
-rw-r--r--mysql-test/main/sp-anchor-row-type-table.result6
-rw-r--r--mysql-test/main/sp-anchor-type.result2
-rw-r--r--mysql-test/main/sp-big.result2
-rw-r--r--mysql-test/main/sp-error.result6
-rw-r--r--mysql-test/main/sp-row.result6
-rw-r--r--mysql-test/main/sp-security.result4
-rw-r--r--mysql-test/main/sp.result7
-rw-r--r--mysql-test/main/sp_trans.result2
-rw-r--r--mysql-test/main/sp_trans_log.result2
-rw-r--r--mysql-test/main/sql_safe_updates.result13
-rw-r--r--mysql-test/main/sql_safe_updates.test7
-rw-r--r--mysql-test/main/ssl.result19
-rw-r--r--mysql-test/main/ssl_compress.result19
-rw-r--r--mysql-test/main/ssl_timeout.test2
-rw-r--r--mysql-test/main/stat_tables.result30
-rw-r--r--mysql-test/main/stat_tables.test3
-rw-r--r--mysql-test/main/stat_tables_innodb.result50
-rw-r--r--mysql-test/main/statistics_json.result3
-rw-r--r--mysql-test/main/statistics_json.test1
-rw-r--r--mysql-test/main/statistics_upgrade.test4
-rw-r--r--mysql-test/main/statistics_upgrade_not_done.test4
-rw-r--r--mysql-test/main/status.result18
-rw-r--r--mysql-test/main/subselect.result161
-rw-r--r--mysql-test/main/subselect.test73
-rw-r--r--mysql-test/main/subselect2.result20
-rw-r--r--mysql-test/main/subselect2.test4
-rw-r--r--mysql-test/main/subselect3.inc41
-rw-r--r--mysql-test/main/subselect3.result53
-rw-r--r--mysql-test/main/subselect3_jcl6.result69
-rw-r--r--mysql-test/main/subselect4.result87
-rw-r--r--mysql-test/main/subselect4.test8
-rw-r--r--mysql-test/main/subselect_cache.result30
-rw-r--r--mysql-test/main/subselect_cache.test2
-rw-r--r--mysql-test/main/subselect_exists2in.result47
-rw-r--r--mysql-test/main/subselect_exists2in.test16
-rw-r--r--mysql-test/main/subselect_exists2in_costmat.result2
-rw-r--r--mysql-test/main/subselect_exists2in_costmat.test1
-rw-r--r--mysql-test/main/subselect_extra.result14
-rw-r--r--mysql-test/main/subselect_extra_no_semijoin.result6
-rw-r--r--mysql-test/main/subselect_firstmatch.result30
-rw-r--r--mysql-test/main/subselect_firstmatch.test28
-rw-r--r--mysql-test/main/subselect_innodb.result27
-rw-r--r--mysql-test/main/subselect_innodb.test14
-rw-r--r--mysql-test/main/subselect_mat.result83
-rw-r--r--mysql-test/main/subselect_mat.test3
-rw-r--r--mysql-test/main/subselect_mat_cost.opt (renamed from mysql-test/main/subselect_mat_cost-master.opt)0
-rw-r--r--mysql-test/main/subselect_mat_cost.result77
-rw-r--r--mysql-test/main/subselect_mat_cost.test37
-rw-r--r--mysql-test/main/subselect_mat_cost_bugs.result18
-rw-r--r--mysql-test/main/subselect_no_exists_to_in.result153
-rw-r--r--mysql-test/main/subselect_no_mat.result151
-rw-r--r--mysql-test/main/subselect_no_opts.result123
-rw-r--r--mysql-test/main/subselect_no_scache.result161
-rw-r--r--mysql-test/main/subselect_no_semijoin.result185
-rw-r--r--mysql-test/main/subselect_no_semijoin.test1
-rw-r--r--mysql-test/main/subselect_partial_match.result22
-rw-r--r--mysql-test/main/subselect_partial_match.test14
-rw-r--r--mysql-test/main/subselect_sj.result222
-rw-r--r--mysql-test/main/subselect_sj.test26
-rw-r--r--mysql-test/main/subselect_sj2.result82
-rw-r--r--mysql-test/main/subselect_sj2.test33
-rw-r--r--mysql-test/main/subselect_sj2_jcl6.result132
-rw-r--r--mysql-test/main/subselect_sj2_jcl6.test5
-rw-r--r--mysql-test/main/subselect_sj2_mat.result143
-rw-r--r--mysql-test/main/subselect_sj2_mat.test3
-rw-r--r--mysql-test/main/subselect_sj_jcl6.result268
-rw-r--r--mysql-test/main/subselect_sj_jcl6.test19
-rw-r--r--mysql-test/main/subselect_sj_mat.result229
-rw-r--r--mysql-test/main/subselect_sj_mat.test23
-rw-r--r--mysql-test/main/subselect_sj_nonmerged.result8
-rw-r--r--mysql-test/main/system_mysql_db_error_log.result2
-rw-r--r--mysql-test/main/table_elim.result34
-rw-r--r--mysql-test/main/table_elim.test4
-rw-r--r--mysql-test/main/table_value_constr.result76
-rw-r--r--mysql-test/main/table_value_constr.test13
-rw-r--r--mysql-test/main/tmp_table_count-7586.result2
-rw-r--r--mysql-test/main/tmp_table_count-7586.test2
-rw-r--r--mysql-test/main/trigger.result4
-rw-r--r--mysql-test/main/trigger_notembedded.result2
-rw-r--r--mysql-test/main/type_blob.result8
-rw-r--r--mysql-test/main/type_datetime.result4
-rw-r--r--mysql-test/main/type_enum.result2
-rw-r--r--mysql-test/main/type_ranges.result2
-rw-r--r--mysql-test/main/type_set.result2
-rw-r--r--mysql-test/main/type_time_6065.result75
-rw-r--r--mysql-test/main/type_timestamp.result2
-rw-r--r--mysql-test/main/union.result75
-rw-r--r--mysql-test/main/union.test20
-rw-r--r--mysql-test/main/update_use_source.result16
-rw-r--r--mysql-test/main/upgrade_MDEV-19650.test4
-rw-r--r--mysql-test/main/upgrade_MDEV-23102-1.test8
-rw-r--r--mysql-test/main/upgrade_MDEV-23102-2.test8
-rw-r--r--mysql-test/main/upgrade_geometrycolumn_procedure_definer.test4
-rw-r--r--mysql-test/main/upgrade_mdev_24363.test4
-rw-r--r--mysql-test/main/userstat.result2
-rw-r--r--mysql-test/main/view.result60
-rw-r--r--mysql-test/main/view.test11
-rw-r--r--mysql-test/main/view_grant.result12
-rw-r--r--mysql-test/main/win.result39
-rw-r--r--mysql-test/main/win.test12
-rw-r--r--mysql-test/main/win_empty_over.result6
-rw-r--r--mysql-test/main/win_empty_over.test2
-rw-r--r--mysql-test/main/xtradb_mrr.result6
-rw-r--r--mysql-test/main/xtradb_mrr.test3
-rwxr-xr-xmysql-test/mariadb-test-run.pl1
-rw-r--r--mysql-test/std_data/bug47142_master-bin.000001bin386 -> 0 bytes
-rw-r--r--mysql-test/std_data/master-bin.000001bin98 -> 0 bytes
-rw-r--r--mysql-test/std_data/trunc_binlog.000001bin174 -> 0 bytes
-rw-r--r--mysql-test/suite/archive/archive.result4
-rw-r--r--mysql-test/suite/archive/archive.test3
-rw-r--r--mysql-test/suite/binlog/r/binlog_base64_flag.result33
-rw-r--r--mysql-test/suite/binlog/r/binlog_grant.result38
-rw-r--r--mysql-test/suite/binlog/r/binlog_old_versions.result70
-rw-r--r--mysql-test/suite/binlog/r/binlog_unsafe.result4
-rw-r--r--mysql-test/suite/binlog/std_data/binlog_old_version_4_1.000001bin149436 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/std_data/bug32407.001bin368 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/std_data/ver_5_1-telco.001bin150385 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/std_data/ver_5_1_17.001bin150385 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/std_data/ver_5_1_23.001bin150402 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/std_data/ver_trunk_row_v2.001bin151722 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/t/binlog_base64_flag.test28
-rw-r--r--mysql-test/suite/binlog/t/binlog_expire_warnings.opt1
-rw-r--r--mysql-test/suite/binlog/t/binlog_grant.test33
-rw-r--r--mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test2
-rw-r--r--mysql-test/suite/binlog/t/binlog_old_versions.test153
-rw-r--r--mysql-test/suite/binlog/t/binlog_truncate_multi_engine.test1
-rw-r--r--mysql-test/suite/binlog_encryption/rpl_skip_replication.result2
-rw-r--r--mysql-test/suite/binlog_encryption/rpl_sync-master.opt1
-rw-r--r--mysql-test/suite/binlog_encryption/rpl_sync-slave.opt2
-rw-r--r--mysql-test/suite/compat/oracle/r/sp-package-innodb.result2
-rw-r--r--mysql-test/suite/compat/oracle/r/sp-package.result9
-rw-r--r--mysql-test/suite/compat/oracle/r/sp-row.result18
-rw-r--r--mysql-test/suite/compat/oracle/r/table_value_constr.result66
-rw-r--r--mysql-test/suite/compat/oracle/r/update_innodb.result4
-rw-r--r--mysql-test/suite/compat/oracle/t/table_value_constr.test1
-rw-r--r--mysql-test/suite/encryption/r/encrypt_and_grep.result1
-rw-r--r--mysql-test/suite/encryption/r/innochecksum.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-bad-key-change.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-bad-key-change2.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-bad-key-change4.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-checksum-algorithm.result3
-rw-r--r--mysql-test/suite/encryption/r/innodb-compressed-blob.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-force-corrupt.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-key-rotation-disable.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-redo-badkey.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-redo-nokeys.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-remove-encryption.result2
-rw-r--r--mysql-test/suite/encryption/r/innodb-spatial-index.result3
-rw-r--r--mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result1
-rw-r--r--mysql-test/suite/encryption/r/tempfiles_encrypted.result39
-rw-r--r--mysql-test/suite/encryption/t/encrypt_and_grep.test2
-rw-r--r--mysql-test/suite/encryption/t/innochecksum.test1
-rw-r--r--mysql-test/suite/encryption/t/innodb-bad-key-change.test2
-rw-r--r--mysql-test/suite/encryption/t/innodb-bad-key-change2.test2
-rw-r--r--mysql-test/suite/encryption/t/innodb-bad-key-change4.test2
-rw-r--r--mysql-test/suite/encryption/t/innodb-checksum-algorithm.test3
-rw-r--r--mysql-test/suite/encryption/t/innodb-compressed-blob.test2
-rw-r--r--mysql-test/suite/encryption/t/innodb-force-corrupt.test1
-rw-r--r--mysql-test/suite/encryption/t/innodb-key-rotation-disable.test1
-rw-r--r--mysql-test/suite/encryption/t/innodb-redo-badkey.opt1
-rw-r--r--mysql-test/suite/encryption/t/innodb-redo-badkey.test2
-rw-r--r--mysql-test/suite/encryption/t/innodb-redo-nokeys.opt1
-rw-r--r--mysql-test/suite/encryption/t/innodb-redo-nokeys.test2
-rw-r--r--mysql-test/suite/encryption/t/innodb_onlinealter_encryption.test2
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_drop_db.result2
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_misc_functions.result2
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.rdiff4
-rw-r--r--mysql-test/suite/engines/funcs/r/rpl_temporary.result4
-rw-r--r--mysql-test/suite/engines/iuds/r/type_bit_iuds.result296
-rw-r--r--mysql-test/suite/engines/iuds/t/type_bit_iuds.test64
-rw-r--r--mysql-test/suite/federated/federated_server.result10
-rw-r--r--mysql-test/suite/federated/federated_server.test4
-rw-r--r--mysql-test/suite/federated/federatedx.result16
-rw-r--r--mysql-test/suite/federated/federatedx.test22
-rw-r--r--mysql-test/suite/federated/federatedx_create_handlers.result27
-rw-r--r--mysql-test/suite/federated/federatedx_create_handlers.test4
-rw-r--r--mysql-test/suite/funcs_1/r/innodb_trig_03e.result4
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_is.result26
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_is_embedded.result26
-rw-r--r--mysql-test/suite/funcs_1/r/is_tables_is.result50
-rw-r--r--mysql-test/suite/funcs_1/r/is_tables_is_embedded.result50
-rw-r--r--mysql-test/suite/funcs_1/r/memory_trig_03e.result4
-rw-r--r--mysql-test/suite/funcs_1/r/myisam_trig_03e.result4
-rw-r--r--mysql-test/suite/galera/r/galera_event_node_evict.result21
-rw-r--r--mysql-test/suite/galera/r/galera_ist_MDEV-28423,debug.rdiff4
-rw-r--r--mysql-test/suite/galera/r/galera_ist_MDEV-28583,debug.rdiff4
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff4
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs,debug.rdiff4
-rw-r--r--mysql-test/suite/galera/r/galera_ist_rsync,debug.rdiff4
-rw-r--r--mysql-test/suite/galera/r/galera_many_indexes.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir,debug.rdiff4
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mariabackup_force_recovery,debug.rdiff4
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff4
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync_data_dir,debug.rdiff4
-rw-r--r--mysql-test/suite/galera/t/galera_event_node_evict.cnf14
-rw-r--r--mysql-test/suite/galera/t/galera_event_node_evict.test96
-rw-r--r--mysql-test/suite/galera/t/galera_ist_MDEV-28423.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_ist_MDEV-28583.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_load_data.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_sst_encrypted.cnf1
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf1
-rw-r--r--mysql-test/suite/gcol/inc/gcol_keys.inc6
-rw-r--r--mysql-test/suite/gcol/inc/gcol_select.inc8
-rw-r--r--mysql-test/suite/gcol/r/gcol_bugfixes.result4
-rw-r--r--mysql-test/suite/gcol/r/gcol_ins_upd_innodb.result2
-rw-r--r--mysql-test/suite/gcol/r/gcol_keys_innodb.result6
-rw-r--r--mysql-test/suite/gcol/r/gcol_keys_myisam.result6
-rw-r--r--mysql-test/suite/gcol/r/gcol_select_innodb.result30
-rw-r--r--mysql-test/suite/gcol/r/gcol_select_myisam.result38
-rw-r--r--mysql-test/suite/handler/aria.result2
-rw-r--r--mysql-test/suite/handler/heap.result2
-rw-r--r--mysql-test/suite/handler/innodb.result2
-rw-r--r--mysql-test/suite/handler/myisam.result2
-rw-r--r--mysql-test/suite/heap/heap_btree.result4
-rw-r--r--mysql-test/suite/heap/heap_btree.test2
-rw-r--r--mysql-test/suite/innodb/include/innodb_bulk_create_index.inc8
-rw-r--r--mysql-test/suite/innodb/include/innodb_bulk_create_index_debug.inc8
-rw-r--r--mysql-test/suite/innodb/include/innodb_merge_threshold_delete.inc2
-rw-r--r--mysql-test/suite/innodb/include/innodb_merge_threshold_secondary.inc2
-rw-r--r--mysql-test/suite/innodb/include/innodb_merge_threshold_update.inc2
-rw-r--r--mysql-test/suite/innodb/r/alter_algorithm,INPLACE.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/alter_algorithm,INSTANT.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/alter_algorithm,NOCOPY.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/alter_kill.result2
-rw-r--r--mysql-test/suite/innodb/r/alter_missing_tablespace.result1
-rw-r--r--mysql-test/suite/innodb/r/alter_rename_existing.result6
-rw-r--r--mysql-test/suite/innodb/r/check_ibd_filesize,32k.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/check_ibd_filesize,4k.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/check_ibd_filesize,64k.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/check_ibd_filesize,8k.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/defrag_mdl-9155.result4
-rw-r--r--mysql-test/suite/innodb/r/dropdb_cs.result4
-rw-r--r--mysql-test/suite/innodb/r/gap_locks.result2
-rw-r--r--mysql-test/suite/innodb/r/ibuf_delete.result53
-rw-r--r--mysql-test/suite/innodb/r/ibuf_not_empty.result20
-rw-r--r--mysql-test/suite/innodb/r/index_tree_operation.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-fkcheck.result7
-rw-r--r--mysql-test/suite/innodb/r/innodb-index-online.result5
-rw-r--r--mysql-test/suite/innodb/r/innodb-index.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb-isolation.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb-system-table-view.result5
-rw-r--r--mysql-test/suite/innodb/r/innodb-table-online.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb-wl5522-debug.result11
-rw-r--r--mysql-test/suite/innodb/r/innodb-wl5522.result5
-rw-r--r--mysql-test/suite/innodb/r/innodb-wl5980-alter.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb.result20
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug14147491.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug30423.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug30919.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug51920.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug56947.result9
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug57252.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug59733.result18
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug68148.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_bulk_create_index.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_bulk_create_index_debug.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb_bulk_create_index_small.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb_defrag_concurrent.result8
-rw-r--r--mysql-test/suite/innodb/r/innodb_defrag_stats.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb_defrag_stats_many_tables.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb_defragment.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb_defragment_small.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb_mysql.result85
-rw-r--r--mysql-test/suite/innodb/r/innodb_scrub.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result16
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats.result8
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_fetch.result8
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_status_variables.result10
-rw-r--r--mysql-test/suite/innodb/r/insert_debug.result2
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_debug.result4
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_limit,16k.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_limit,32k.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_limit,4k.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_limit,64k.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_limit,8k.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/log_data_file_size.result4
-rw-r--r--mysql-test/suite/innodb/r/log_file_name.result1
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,16k,compact,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,16k,dynamic,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,16k,innodb,redundant.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,32k,compact,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,32k,dynamic,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,32k,innodb,redundant.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,4k,compact,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,4k,dynamic,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,4k,innodb,redundant.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,64k,compact,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,64k,dynamic,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,64k,innodb,redundant.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,8k,compact,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/max_record_size,8k,dynamic,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/mdev-14846.result14
-rw-r--r--mysql-test/suite/innodb/r/mdev-15707.result24
-rw-r--r--mysql-test/suite/innodb/r/monitor.result16
-rw-r--r--mysql-test/suite/innodb/r/mvcc.result3
-rw-r--r--mysql-test/suite/innodb/r/partition_locking.result2
-rw-r--r--mysql-test/suite/innodb/r/restart,16k,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/restart,32k,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/restart,4k,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/restart,64k,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/restart,8k,innodb.rdiff4
-rw-r--r--mysql-test/suite/innodb/r/row_format_redundant.result7
-rw-r--r--mysql-test/suite/innodb/r/table_flags.result1
-rw-r--r--mysql-test/suite/innodb/r/table_index_statistics.result3
-rw-r--r--mysql-test/suite/innodb/t/alter_kill.test3
-rw-r--r--mysql-test/suite/innodb/t/alter_missing_tablespace.test1
-rw-r--r--mysql-test/suite/innodb/t/encryption_threads_shutdown.test13
-rw-r--r--mysql-test/suite/innodb/t/gap_locks.test2
-rw-r--r--mysql-test/suite/innodb/t/ibuf_delete.test67
-rw-r--r--mysql-test/suite/innodb/t/ibuf_not_empty.combinations9
-rw-r--r--mysql-test/suite/innodb/t/ibuf_not_empty.test117
-rw-r--r--mysql-test/suite/innodb/t/index_tree_operation.test10
-rw-r--r--mysql-test/suite/innodb/t/innodb-bug-14068765.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb-bug-14084530.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb-enlarge-blob.opt1
-rw-r--r--mysql-test/suite/innodb/t/innodb-fkcheck.test23
-rw-r--r--mysql-test/suite/innodb/t/innodb-index-online.test5
-rw-r--r--mysql-test/suite/innodb/t/innodb-table-online.test5
-rw-r--r--mysql-test/suite/innodb/t/innodb-wl5522-debug.test25
-rw-r--r--mysql-test/suite/innodb/t/innodb-wl5522.test9
-rw-r--r--mysql-test/suite/innodb/t/innodb-wl5980-alter.test4
-rw-r--r--mysql-test/suite/innodb/t/innodb.opt1
-rw-r--r--mysql-test/suite/innodb/t/innodb.test6
-rw-r--r--mysql-test/suite/innodb/t/innodb_buffer_pool_load_now.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug14147491-master.opt1
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug14147491.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug39438-master.opt1
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug39438.test4
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug56947.test5
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug59733.test53
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug68148.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb_bulk_create_index_small.test7
-rw-r--r--mysql-test/suite/innodb/t/innodb_defragment.opt3
-rw-r--r--mysql-test/suite/innodb/t/insert_debug.test2
-rw-r--r--mysql-test/suite/innodb/t/log_corruption.test26
-rw-r--r--mysql-test/suite/innodb/t/log_data_file_size.opt1
-rw-r--r--mysql-test/suite/innodb/t/log_file_name.test1
-rw-r--r--mysql-test/suite/innodb/t/log_upgrade.test19
-rw-r--r--mysql-test/suite/innodb/t/mdev-14846.test6
-rw-r--r--mysql-test/suite/innodb/t/mdev-15707.opt1
-rw-r--r--mysql-test/suite/innodb/t/mdev-15707.test30
-rw-r--r--mysql-test/suite/innodb/t/monitor.test2
-rw-r--r--mysql-test/suite/innodb/t/mvcc.test5
-rw-r--r--mysql-test/suite/innodb/t/partition_locking.test2
-rw-r--r--mysql-test/suite/innodb/t/row_format_redundant.opt1
-rw-r--r--mysql-test/suite/innodb/t/row_format_redundant.test8
-rw-r--r--mysql-test/suite/innodb/t/table_flags.opt2
-rw-r--r--mysql-test/suite/innodb/t/table_flags.test1
-rw-r--r--mysql-test/suite/innodb_fts/r/crash_recovery.result2
-rw-r--r--mysql-test/suite/innodb_fts/r/fulltext.result11
-rw-r--r--mysql-test/suite/innodb_fts/r/fulltext_misc.result20
-rw-r--r--mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result5
-rw-r--r--mysql-test/suite/innodb_fts/r/innodb_fts_proximity.result3
-rw-r--r--mysql-test/suite/innodb_fts/t/fulltext.test2
-rw-r--r--mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test11
-rw-r--r--mysql-test/suite/innodb_fts/t/innodb_fts_proximity.test12
-rw-r--r--mysql-test/suite/innodb_gis/r/create_spatial_index.result166
-rw-r--r--mysql-test/suite/innodb_gis/r/rtree.result8
-rw-r--r--mysql-test/suite/innodb_gis/r/rtree_multi_pk.result8
-rw-r--r--mysql-test/suite/innodb_gis/t/create_spatial_index.test97
-rw-r--r--mysql-test/suite/innodb_gis/t/rtree_multi_pk.test1
-rw-r--r--mysql-test/suite/innodb_zip/r/bug36169.result2
-rw-r--r--mysql-test/suite/innodb_zip/r/bug53591.result7
-rw-r--r--mysql-test/suite/innodb_zip/r/bug56680.result1
-rw-r--r--mysql-test/suite/innodb_zip/r/cmp_drop_table.result1
-rw-r--r--mysql-test/suite/innodb_zip/r/create_options.result17
-rw-r--r--mysql-test/suite/innodb_zip/r/index_large_prefix.result2
-rw-r--r--mysql-test/suite/innodb_zip/r/innochecksum_2.result3
-rw-r--r--mysql-test/suite/innodb_zip/r/innodb-zip.result12
-rw-r--r--mysql-test/suite/innodb_zip/r/large_blob.result5
-rw-r--r--mysql-test/suite/innodb_zip/r/page_size,4k.rdiff24
-rw-r--r--mysql-test/suite/innodb_zip/r/page_size,8k.rdiff24
-rw-r--r--mysql-test/suite/innodb_zip/r/page_size.result7
-rw-r--r--mysql-test/suite/innodb_zip/r/restart.result34
-rw-r--r--mysql-test/suite/innodb_zip/r/wl5522_debug_zip.result5
-rw-r--r--mysql-test/suite/innodb_zip/r/wl5522_zip.result4
-rw-r--r--mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result16
-rw-r--r--mysql-test/suite/innodb_zip/t/bug36169.test7
-rw-r--r--mysql-test/suite/innodb_zip/t/bug53591.test10
-rw-r--r--mysql-test/suite/innodb_zip/t/bug56680.test1
-rw-r--r--mysql-test/suite/innodb_zip/t/cmp_drop_table.test11
-rw-r--r--mysql-test/suite/innodb_zip/t/create_options.test9
-rw-r--r--mysql-test/suite/innodb_zip/t/index_large_prefix.test6
-rw-r--r--mysql-test/suite/innodb_zip/t/index_large_prefix_4k.test6
-rw-r--r--mysql-test/suite/innodb_zip/t/index_large_prefix_8k.test6
-rw-r--r--mysql-test/suite/innodb_zip/t/innochecksum_2.test4
-rw-r--r--mysql-test/suite/innodb_zip/t/innodb-zip.test8
-rw-r--r--mysql-test/suite/innodb_zip/t/large_blob.test9
-rw-r--r--mysql-test/suite/innodb_zip/t/page_size.test28
-rw-r--r--mysql-test/suite/innodb_zip/t/restart.test13
-rw-r--r--mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test15
-rw-r--r--mysql-test/suite/innodb_zip/t/wl5522_zip.test8
-rw-r--r--mysql-test/suite/innodb_zip/t/wl6347_comp_indx_stat.test16
-rw-r--r--mysql-test/suite/json/r/json_table.result3
-rw-r--r--mysql-test/suite/json/r/json_table_mysql.result6
-rw-r--r--mysql-test/suite/json/t/json_table.test2
-rw-r--r--mysql-test/suite/json/t/json_table_mysql.test1
-rw-r--r--mysql-test/suite/maria/crash-recursive.result53
-rw-r--r--mysql-test/suite/maria/crash-recursive.test67
-rw-r--r--mysql-test/suite/maria/icp.result28
-rw-r--r--mysql-test/suite/maria/mrr.result12
-rw-r--r--mysql-test/suite/mariabackup/mdev-14447.result2
-rw-r--r--mysql-test/suite/mariabackup/xb_compressed_encrypted.opt1
-rw-r--r--mysql-test/suite/mariabackup/xb_fulltext_encrypted.opt1
-rw-r--r--mysql-test/suite/mtr/t/simple,c2,s1.rdiff4
-rw-r--r--mysql-test/suite/mtr/t/simple,s2,c2.rdiff4
-rw-r--r--mysql-test/suite/parts/inc/partition_decimal.inc4
-rw-r--r--mysql-test/suite/parts/inc/partition_double.inc4
-rw-r--r--mysql-test/suite/parts/inc/partition_key_16col.inc1
-rw-r--r--mysql-test/suite/parts/inc/partition_key_32col.inc1
-rw-r--r--mysql-test/suite/parts/inc/partition_key_4col.inc1
-rw-r--r--mysql-test/suite/parts/inc/partition_key_8col.inc1
-rw-r--r--mysql-test/suite/parts/inc/partition_time.inc2
-rw-r--r--mysql-test/suite/parts/inc/partition_timestamp.inc5
-rw-r--r--mysql-test/suite/parts/r/alter_data_directory_innodb.result4
-rw-r--r--mysql-test/suite/parts/r/longname.result4
-rw-r--r--mysql-test/suite/parts/r/optimizer.result8
-rw-r--r--mysql-test/suite/parts/r/partition_basic_symlink_innodb.result8
-rw-r--r--mysql-test/suite/parts/r/partition_char_innodb.resultbin50530 -> 50530 bytes
-rw-r--r--mysql-test/suite/parts/r/partition_datetime_innodb.result966
-rw-r--r--mysql-test/suite/parts/r/partition_datetime_myisam.result122
-rw-r--r--mysql-test/suite/parts/r/partition_decimal_innodb.result28
-rw-r--r--mysql-test/suite/parts/r/partition_decimal_myisam.result28
-rw-r--r--mysql-test/suite/parts/r/partition_double_innodb.result16
-rw-r--r--mysql-test/suite/parts/r/partition_double_myisam.result16
-rw-r--r--mysql-test/suite/parts/r/partition_float_innodb.result10
-rw-r--r--mysql-test/suite/parts/r/partition_special_innodb.result8
-rw-r--r--mysql-test/suite/parts/r/partition_special_myisam.result8
-rw-r--r--mysql-test/suite/parts/r/percona_nonflushing_analyze_debug.result12
-rw-r--r--mysql-test/suite/parts/r/reorganize_partition_innodb.result4
-rw-r--r--mysql-test/suite/parts/t/debug_innodb_crash-master.opt2
-rw-r--r--mysql-test/suite/parts/t/partition_debug_sync_innodb-master.opt1
-rw-r--r--mysql-test/suite/perfschema/include/upgrade_check.inc2
-rw-r--r--mysql-test/suite/perfschema/r/alter_table_progress.result2
-rw-r--r--mysql-test/suite/perfschema/r/batch_table_io_func.result11
-rw-r--r--mysql-test/suite/perfschema/r/dml_handler.result2
-rw-r--r--mysql-test/suite/perfschema/r/ortho_iter.result4
-rw-r--r--mysql-test/suite/perfschema/r/rpl_threads.result6
-rw-r--r--mysql-test/suite/perfschema/r/selects.result2
-rw-r--r--mysql-test/suite/perfschema/t/show_sanity.test3
-rw-r--r--mysql-test/suite/period/r/delete,myisam.rdiff4
-rw-r--r--mysql-test/suite/roles/admin.result4
-rw-r--r--mysql-test/suite/roles/create_and_drop_role_invalid_user_table.test4
-rw-r--r--mysql-test/suite/roles/definer.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_cross_version.result22
-rw-r--r--mysql-test/suite/rpl/r/rpl_delayed_slave,parallel.rdiff4
-rw-r--r--mysql-test/suite/rpl/r/rpl_drop_db.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_innodb_bug28430.result12
-rw-r--r--mysql-test/suite/rpl/r/rpl_innodb_bug30888.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_insert_delayed,stmt.rdiff4
-rw-r--r--mysql-test/suite/rpl/r/rpl_iodku,stmt.rdiff4
-rw-r--r--mysql-test/suite/rpl/r/rpl_mdev12179.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_misc_functions.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_old_master_29078.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_parallel_29322.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.rdiff4
-rw-r--r--mysql-test/suite/rpl/r/rpl_skip_replication.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_temporary.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_timestamp.result2
-rw-r--r--mysql-test/suite/rpl/t/rpl_cross_version-master.opt1
-rw-r--r--mysql-test/suite/rpl/t/rpl_cross_version.test48
-rw-r--r--mysql-test/suite/rpl/t/rpl_mysql_upgrade.test2
-rw-r--r--mysql-test/suite/sys_vars/inc/explicit_defaults_for_timestamp.inc2
-rw-r--r--mysql-test/suite/sys_vars/inc/sysvar_global_and_session_grant.inc17
-rw-r--r--mysql-test/suite/sys_vars/inc/sysvar_global_grant.inc20
-rw-r--r--mysql-test/suite/sys_vars/inc/sysvar_global_grant_alone.inc16
-rw-r--r--mysql-test/suite/sys_vars/inc/sysvar_session_grant.inc18
-rw-r--r--mysql-test/suite/sys_vars/inc/sysvar_session_grant_alone.inc16
-rw-r--r--mysql-test/suite/sys_vars/r/aria_sort_buffer_size_basic,32bit.rdiff4
-rw-r--r--mysql-test/suite/sys_vars/r/binlog_annotate_row_events_grant.result21
-rw-r--r--mysql-test/suite/sys_vars/r/binlog_cache_size_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/binlog_commit_wait_count_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/binlog_commit_wait_usec_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/binlog_direct_non_transactional_updates_grant.result21
-rw-r--r--mysql-test/suite/sys_vars/r/binlog_expire_logs_seconds_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/binlog_file_cache_size_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/binlog_format_grant.result21
-rw-r--r--mysql-test/suite/sys_vars/r/binlog_row_image_grant.result21
-rw-r--r--mysql-test/suite/sys_vars/r/binlog_row_metadata_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/binlog_stmt_cache_size_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/connect_timeout_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/disconnect_on_expired_password_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/expire_logs_days_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/explicit_defaults_for_timestamp_off.result4
-rw-r--r--mysql-test/suite/sys_vars/r/explicit_defaults_for_timestamp_on.result2
-rw-r--r--mysql-test/suite/sys_vars/r/extra_max_connections_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/gtid_binlog_state_grant.result20
-rw-r--r--mysql-test/suite/sys_vars/r/gtid_cleanup_batch_size_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/gtid_domain_id_grant.result33
-rw-r--r--mysql-test/suite/sys_vars/r/gtid_ignore_duplicates_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/gtid_pos_auto_engines_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/gtid_seq_no_grant.result20
-rw-r--r--mysql-test/suite/sys_vars/r/gtid_slave_pos_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/gtid_strict_mode_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/histogram_type_basic.result2
-rw-r--r--mysql-test/suite/sys_vars/r/init_connect_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/init_slave_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_change_buffer_max_size_basic.result77
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_change_buffering_basic.result73
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_change_buffering_debug_basic.result67
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_defragment_basic.result6
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_defragment_fill_factor_basic.result10
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_defragment_fill_factor_n_recs_basic.result10
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_defragment_frequency_basic.result10
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_defragment_n_pages_basic.result8
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_defragment_stats_accuracy_basic.result8
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_fil_make_page_dirty_debug_basic.result2
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_file_per_table_basic.result10
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit,32bit.rdiff4
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_saved_page_number_debug_basic.result2
-rw-r--r--mysql-test/suite/sys_vars/r/log_bin_compress_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/log_bin_compress_min_len_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/log_bin_trust_function_creators_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/log_slow_admin_statements_func.result12
-rw-r--r--mysql-test/suite/sys_vars/r/master_verify_checksum_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/max_binlog_cache_size_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/max_binlog_size_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/max_binlog_stmt_cache_size_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/max_connect_errors_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/max_connections_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/max_join_size_basic.result8
-rw-r--r--mysql-test/suite/sys_vars/r/max_join_size_func.result6
-rw-r--r--mysql-test/suite/sys_vars/r/max_password_errors_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/preudo_thread_id_grant.result20
-rw-r--r--mysql-test/suite/sys_vars/r/proxy_protocol_networks_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/read_binlog_speed_limit_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/read_only_grant.result2
-rw-r--r--mysql-test/suite/sys_vars/r/relay_log_purge_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/relay_log_recovery_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/replicate_do_db_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/replicate_do_table_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/replicate_events_marked_for_skip_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/replicate_ignore_db_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/replicate_ignore_table_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/replicate_wild_do_table_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/replicate_wild_ignore_table_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/rpl_semi_sync_master_enabled_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/rpl_semi_sync_master_timeout_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/rpl_semi_sync_master_trace_level_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/rpl_semi_sync_master_wait_no_slave_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/rpl_semi_sync_master_wait_point_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_delay_master_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_enabled_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_kill_conn_timeout_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_trace_level_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/secure_auth_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/secure_file_priv.result2
-rw-r--r--mysql-test/suite/sys_vars/r/server_id_grant.result33
-rw-r--r--mysql-test/suite/sys_vars/r/slave_compressed_protocol_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_ddl_exec_mode_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_domain_parallel_threads_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_exec_mode_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_max_allowed_packet_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_net_timeout_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_parallel_max_queued_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_parallel_mode_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_parallel_threads_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_parallel_workers_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_run_triggers_for_rbr_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_sql_verify_checksum_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_transaction_retry_interval_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slave_type_conversions_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/slow_launch_time_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/sql_big_selects_func.result2
-rw-r--r--mysql-test/suite/sys_vars/r/sql_log_bin_grant.result22
-rw-r--r--mysql-test/suite/sys_vars/r/sync_binlog_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/sync_master_info_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/sync_relay_log_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/sync_relay_log_info_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_aria,32bit.rdiff4
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_debug,32bit.rdiff4
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb.result86
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_server_embedded.result142
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result142
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_star.result2
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_wsrep,32bit.rdiff4
-rw-r--r--mysql-test/suite/sys_vars/r/thread_pool_idle_timeout_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/thread_pool_max_threads_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/thread_pool_oversubscribe_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/r/thread_pool_stall_limit_grant.result19
-rw-r--r--mysql-test/suite/sys_vars/t/binlog_direct_non_transactional_updates_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/binlog_format_grant.test18
-rw-r--r--mysql-test/suite/sys_vars/t/connect_timeout_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/disconnect_on_expired_password_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/extra_max_connections_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/gtid_binlog_state_grant.test23
-rw-r--r--mysql-test/suite/sys_vars/t/init_connect_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_change_buffer_max_size_basic.test71
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test65
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_change_buffering_debug_basic.test59
-rw-r--r--mysql-test/suite/sys_vars/t/max_connect_errors_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/max_connections_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/max_join_size_basic.test2
-rw-r--r--mysql-test/suite/sys_vars/t/max_join_size_func.test4
-rw-r--r--mysql-test/suite/sys_vars/t/max_password_errors_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/proxy_protocol_networks_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/secure_auth_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/slow_launch_time_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/sql_big_selects_func.test4
-rw-r--r--mysql-test/suite/sys_vars/t/sql_log_bin_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/thread_pool_idle_timeout_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/thread_pool_max_threads_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/thread_pool_oversubscribe_grant.test19
-rw-r--r--mysql-test/suite/sys_vars/t/thread_pool_stall_limit_grant.test19
-rw-r--r--mysql-test/suite/sysschema/r/all_sys_objects_exist.result3
-rw-r--r--mysql-test/suite/sysschema/r/optimizer_switch.result40
-rw-r--r--mysql-test/suite/sysschema/r/v_schema_redundant_indexes.result2
-rw-r--r--mysql-test/suite/sysschema/t/mysqldump.test2
-rw-r--r--mysql-test/suite/sysschema/t/optimizer_switch.test2
-rw-r--r--mysql-test/suite/sysschema/t/pr_statement_performance_analyzer.test5
-rw-r--r--mysql-test/suite/sysschema/t/v_schema_redundant_indexes.test1
-rw-r--r--mysql-test/suite/vcol/inc/vcol_ins_upd.inc3
-rw-r--r--mysql-test/suite/vcol/r/vcol_ins_upd_innodb.result1
-rw-r--r--mysql-test/suite/vcol/r/vcol_ins_upd_myisam.result1
-rw-r--r--mysql-test/suite/vcol/r/vcol_select_innodb.result2
-rw-r--r--mysql-test/suite/vcol/r/vcol_select_myisam.result4
-rw-r--r--mysql-test/suite/versioning/r/alter.result2
-rw-r--r--mysql-test/suite/versioning/r/commit_id.result14
-rw-r--r--mysql-test/suite/versioning/r/create.result12
-rw-r--r--mysql-test/suite/versioning/r/cte.result8
-rw-r--r--mysql-test/suite/versioning/r/foreign.result2
-rw-r--r--mysql-test/suite/versioning/r/insert.result2
-rw-r--r--mysql-test/suite/versioning/r/load_data.result2
-rw-r--r--mysql-test/suite/versioning/r/partition.result16
-rw-r--r--mysql-test/suite/versioning/r/select,trx_id.rdiff11
-rw-r--r--mysql-test/suite/versioning/r/select.result8
-rw-r--r--mysql-test/suite/versioning/r/select2,trx_id.rdiff16
-rw-r--r--mysql-test/suite/versioning/r/select2.result4
-rw-r--r--mysql-test/suite/versioning/r/trx_id.result16
-rw-r--r--mysql-test/suite/versioning/r/update,trx_id.rdiff4
-rw-r--r--mysql-test/suite/wsrep/r/wsrep-recover-v25,binlogon.rdiff4
-rw-r--r--mysql-test/suite/wsrep/r/wsrep_provider_plugin.result24
-rw-r--r--mysql-test/suite/wsrep/r/wsrep_provider_plugin_basic.result66
-rw-r--r--mysql-test/suite/wsrep/r/wsrep_provider_plugin_defaults.result1270
-rw-r--r--mysql-test/suite/wsrep/r/wsrep_provider_plugin_wsrep_off.result5
-rw-r--r--mysql-test/suite/wsrep/t/variables_debug.test2
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_provider_plugin.cnf8
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_provider_plugin.test41
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_provider_plugin_basic.cnf8
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_provider_plugin_basic.test77
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_provider_plugin_defaults.cnf8
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_provider_plugin_defaults.test30
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_provider_plugin_wsrep_off.cnf12
-rw-r--r--mysql-test/suite/wsrep/t/wsrep_provider_plugin_wsrep_off.test6
-rw-r--r--mysys/array.c43
-rw-r--r--mysys/errors.c2
-rw-r--r--mysys/mf_radix.c2
-rw-r--r--mysys/mf_sort.c2
-rw-r--r--mysys/my_getopt.c2
-rw-r--r--mysys/my_init.c32
-rw-r--r--mysys/my_symlink.c4
-rw-r--r--mysys/my_winfile.c63
-rw-r--r--plugin/aws_key_management/aws_key_management_plugin.cc35
-rw-r--r--plugin/hashicorp_key_management/mysql-test/vault/t/hashicorp_key_rotation_age.test8
-rw-r--r--plugin/type_inet/mysql-test/type_inet/type_inet4.result2
-rw-r--r--plugin/type_inet/mysql-test/type_inet/type_inet6.result2
-rw-r--r--plugin/type_uuid/mysql-test/type_uuid/type_uuid.result2
-rw-r--r--scripts/CMakeLists.txt4
-rwxr-xr-xscripts/convert-debug-for-diff.sh26
-rw-r--r--scripts/fill_help_tables.sql2
-rw-r--r--scripts/mysql_convert_table_format.sh3
-rw-r--r--scripts/mysql_find_rows.sh3
-rw-r--r--scripts/mysql_fix_extensions.sh2
-rw-r--r--scripts/mysql_install_db.sh16
-rw-r--r--scripts/mysql_secure_installation.sh6
-rw-r--r--scripts/mysql_setpermission.sh3
-rw-r--r--scripts/mysqlaccess.sh2
-rw-r--r--scripts/mysqld_multi.sh2
-rw-r--r--scripts/mysqld_safe.sh6
-rw-r--r--scripts/mysqldumpslow.sh3
-rw-r--r--scripts/mysqlhotcopy.sh3
-rw-r--r--scripts/sys_schema/CMakeLists.txt1
-rw-r--r--scripts/sys_schema/procedures/optimizer_switch.sql69
-rw-r--r--scripts/wsrep_sst_mysqldump.sh2
-rw-r--r--sql-common/client.c5
-rw-r--r--sql/CMakeLists.txt6
-rw-r--r--sql/ddl_log.cc2
-rw-r--r--sql/debug.cc2
-rw-r--r--sql/debug.h1
-rw-r--r--sql/events.cc2
-rw-r--r--sql/filesort.cc380
-rw-r--r--sql/filesort_utils.cc337
-rw-r--r--sql/filesort_utils.h85
-rw-r--r--sql/ha_partition.cc156
-rw-r--r--sql/ha_partition.h17
-rw-r--r--sql/handler.cc264
-rw-r--r--sql/handler.h535
-rw-r--r--sql/item.cc53
-rw-r--r--sql/item.h3
-rw-r--r--sql/item_subselect.cc65
-rw-r--r--sql/json_table.cc5
-rw-r--r--sql/keycaches.cc141
-rw-r--r--sql/keycaches.h2
-rw-r--r--sql/log_event.cc409
-rw-r--r--sql/log_event.h626
-rw-r--r--sql/log_event_client.cc202
-rw-r--r--sql/log_event_old.cc2749
-rw-r--r--sql/log_event_old.h569
-rw-r--r--sql/log_event_server.cc1086
-rw-r--r--sql/multi_range_read.cc250
-rw-r--r--sql/multi_range_read.h2
-rw-r--r--sql/my_json_writer.h2
-rw-r--r--sql/mysqld.cc150
-rw-r--r--sql/mysqld.h18
-rw-r--r--sql/opt_index_cond_pushdown.cc13
-rw-r--r--sql/opt_range.cc1409
-rw-r--r--sql/opt_range.h9
-rw-r--r--sql/opt_split.cc128
-rw-r--r--sql/opt_subselect.cc563
-rw-r--r--sql/opt_subselect.h24
-rw-r--r--sql/opt_trace.cc122
-rw-r--r--sql/opt_trace.h6
-rw-r--r--sql/optimizer_costs.h162
-rw-r--r--sql/optimizer_defaults.h190
-rw-r--r--sql/privilege.h227
-rw-r--r--sql/records.cc23
-rw-r--r--sql/rowid_filter.cc221
-rw-r--r--sql/rowid_filter.h141
-rw-r--r--sql/rpl_record_old.cc199
-rw-r--r--sql/rpl_record_old.h35
-rw-r--r--sql/rpl_rli.cc17
-rw-r--r--sql/set_var.cc3
-rw-r--r--sql/set_var.h5
-rw-r--r--sql/share/errmsg-utf8.txt7
-rw-r--r--sql/slave.cc401
-rw-r--r--sql/sql_acl.cc41
-rw-r--r--sql/sql_analyze_stmt.h3
-rw-r--r--sql/sql_base.cc1
-rw-r--r--sql/sql_base.h2
-rw-r--r--sql/sql_bitmap.h12
-rw-r--r--sql/sql_class.cc1
-rw-r--r--sql/sql_class.h138
-rw-r--r--sql/sql_const.h61
-rw-r--r--sql/sql_delete.cc4
-rw-r--r--sql/sql_derived.cc24
-rw-r--r--sql/sql_explain.cc31
-rw-r--r--sql/sql_explain.h14
-rw-r--r--sql/sql_handler.cc5
-rw-r--r--sql/sql_help.cc23
-rw-r--r--sql/sql_join_cache.cc20
-rw-r--r--sql/sql_lex.cc15
-rw-r--r--sql/sql_load.cc82
-rw-r--r--sql/sql_parse.cc11
-rw-r--r--sql/sql_plugin.cc3
-rw-r--r--sql/sql_reload.cc2
-rw-r--r--sql/sql_repl.cc6
-rw-r--r--sql/sql_select.cc3764
-rw-r--r--sql/sql_select.h156
-rw-r--r--sql/sql_servers.cc2
-rw-r--r--sql/sql_show.cc75
-rw-r--r--sql/sql_sort.h29
-rw-r--r--sql/sql_statistics.cc9
-rw-r--r--sql/sql_string.cc3
-rw-r--r--sql/sql_table.cc3
-rw-r--r--sql/sql_test.cc21
-rw-r--r--sql/sql_tvc.cc29
-rw-r--r--sql/sql_udf.cc2
-rw-r--r--sql/sql_union.cc44
-rw-r--r--sql/sql_update.cc17
-rw-r--r--sql/sql_view.cc10
-rw-r--r--sql/sql_window.cc10
-rw-r--r--sql/sql_yacc.yy11
-rw-r--r--sql/structs.h30
-rw-r--r--sql/sys_vars.cc165
-rw-r--r--sql/sys_vars.inl138
-rw-r--r--sql/table.cc447
-rw-r--r--sql/table.h84
-rw-r--r--sql/tztime.cc12
-rw-r--r--sql/uniques.cc43
-rw-r--r--sql/uniques.h4
-rw-r--r--sql/winservice.c12
-rw-r--r--sql/wsrep_check_opts.cc2
-rw-r--r--sql/wsrep_event_service.cc23
-rw-r--r--sql/wsrep_event_service.h49
-rw-r--r--sql/wsrep_mysqld.cc25
-rw-r--r--sql/wsrep_plugin.cc326
-rw-r--r--sql/wsrep_plugin.h36
-rw-r--r--sql/wsrep_server_state.cc59
-rw-r--r--sql/wsrep_server_state.h18
-rw-r--r--sql/wsrep_sst.cc3
-rw-r--r--sql/wsrep_status.h7
-rw-r--r--sql/wsrep_var.cc29
-rw-r--r--sql/wsrep_var.h3
-rw-r--r--storage/archive/archive_reader.c8
-rw-r--r--storage/archive/ha_archive.cc60
-rw-r--r--storage/archive/ha_archive.h4
-rw-r--r--storage/blackhole/ha_blackhole.cc11
-rw-r--r--storage/columnstore/CMakeLists.txt6
m---------storage/columnstore/columnstore0
-rw-r--r--storage/connect/ha_connect.cc10
-rw-r--r--storage/connect/ha_connect.h14
-rw-r--r--storage/connect/mysql-test/connect/r/index.result29
-rw-r--r--storage/connect/mysql-test/connect/r/mysql_index.result13
-rw-r--r--storage/connect/mysql-test/connect/t/index.test7
-rw-r--r--storage/connect/mysql-test/connect/t/mysql_index.test3
-rw-r--r--storage/connect/tabext.cpp2
-rw-r--r--storage/csv/ha_tina.h7
-rw-r--r--storage/example/ha_example.h33
-rw-r--r--storage/federated/ha_federated.cc22
-rw-r--r--storage/federated/ha_federated.h37
-rw-r--r--storage/federatedx/ha_federatedx.cc21
-rw-r--r--storage/federatedx/ha_federatedx.h31
-rw-r--r--storage/heap/ha_heap.cc61
-rw-r--r--storage/heap/ha_heap.h114
-rw-r--r--storage/innobase/CMakeLists.txt3
-rw-r--r--storage/innobase/btr/btr0btr.cc489
-rw-r--r--storage/innobase/btr/btr0bulk.cc12
-rw-r--r--storage/innobase/btr/btr0cur.cc556
-rw-r--r--storage/innobase/btr/btr0defragment.cc42
-rw-r--r--storage/innobase/btr/btr0pcur.cc24
-rw-r--r--storage/innobase/btr/btr0sea.cc18
-rw-r--r--storage/innobase/buf/buf0buddy.cc4
-rw-r--r--storage/innobase/buf/buf0buf.cc509
-rw-r--r--storage/innobase/buf/buf0flu.cc29
-rw-r--r--storage/innobase/buf/buf0lru.cc17
-rw-r--r--storage/innobase/buf/buf0rea.cc377
-rw-r--r--storage/innobase/data/data0type.cc9
-rw-r--r--storage/innobase/dict/dict0boot.cc41
-rw-r--r--storage/innobase/dict/dict0defrag_bg.cc7
-rw-r--r--storage/innobase/dict/dict0dict.cc38
-rw-r--r--storage/innobase/dict/dict0load.cc16
-rw-r--r--storage/innobase/dict/dict0stats.cc33
-rw-r--r--storage/innobase/fil/fil0fil.cc147
-rw-r--r--storage/innobase/fil/fil0pagecompress.cc3
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc25
-rw-r--r--storage/innobase/gis/gis0rtree.cc276
-rw-r--r--storage/innobase/gis/gis0sea.cc83
-rw-r--r--storage/innobase/handler/ha_innodb.cc334
-rw-r--r--storage/innobase/handler/ha_innodb.h9
-rw-r--r--storage/innobase/handler/handler0alter.cc4
-rw-r--r--storage/innobase/handler/i_s.cc42
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc4656
-rw-r--r--storage/innobase/include/btr0btr.h40
-rw-r--r--storage/innobase/include/btr0cur.h35
-rw-r--r--storage/innobase/include/btr0types.h45
-rw-r--r--storage/innobase/include/buf0buf.h142
-rw-r--r--storage/innobase/include/buf0buf.inl2
-rw-r--r--storage/innobase/include/buf0lru.h20
-rw-r--r--storage/innobase/include/buf0rea.h48
-rw-r--r--storage/innobase/include/data0type.h57
-rw-r--r--storage/innobase/include/data0type.inl122
-rw-r--r--storage/innobase/include/dict0boot.h35
-rw-r--r--storage/innobase/include/dict0dict.h23
-rw-r--r--storage/innobase/include/dict0dict.inl2
-rw-r--r--storage/innobase/include/dict0load.h8
-rw-r--r--storage/innobase/include/dict0mem.h20
-rw-r--r--storage/innobase/include/dict0types.h13
-rw-r--r--storage/innobase/include/fil0fil.h58
-rw-r--r--storage/innobase/include/fsp0types.h16
-rw-r--r--storage/innobase/include/gis0rtree.h65
-rw-r--r--storage/innobase/include/gis0rtree.inl5
-rw-r--r--storage/innobase/include/ibuf0ibuf.h457
-rw-r--r--storage/innobase/include/ibuf0ibuf.inl282
-rw-r--r--storage/innobase/include/log0log.h4
-rw-r--r--storage/innobase/include/log0recv.h12
-rw-r--r--storage/innobase/include/mtr0mtr.h13
-rw-r--r--storage/innobase/include/page0cur.h12
-rw-r--r--storage/innobase/include/page0cur.inl7
-rw-r--r--storage/innobase/include/page0page.h19
-rw-r--r--storage/innobase/include/page0zip.h10
-rw-r--r--storage/innobase/include/page0zip.inl4
-rw-r--r--storage/innobase/include/rem0rec.inl6
-rw-r--r--storage/innobase/include/row0purge.h35
-rw-r--r--storage/innobase/include/row0row.h35
-rw-r--r--storage/innobase/include/srv0mon.h21
-rw-r--r--storage/innobase/include/srv0srv.h15
-rw-r--r--storage/innobase/include/sux_lock.h4
-rw-r--r--storage/innobase/include/trx0trx.h25
-rw-r--r--storage/innobase/include/trx0undo.h6
-rw-r--r--storage/innobase/include/univ.i6
-rw-r--r--storage/innobase/log/log0log.cc48
-rw-r--r--storage/innobase/log/log0recv.cc168
-rw-r--r--storage/innobase/mtr/mtr0mtr.cc5
-rw-r--r--storage/innobase/os/os0file.cc214
-rw-r--r--storage/innobase/page/page0cur.cc16
-rw-r--r--storage/innobase/page/page0page.cc39
-rw-r--r--storage/innobase/page/page0zip.cc14
-rw-r--r--storage/innobase/rem/rem0cmp.cc46
-rw-r--r--storage/innobase/rem/rem0rec.cc9
-rw-r--r--storage/innobase/row/row0import.cc89
-rw-r--r--storage/innobase/row/row0ins.cc63
-rw-r--r--storage/innobase/row/row0log.cc62
-rw-r--r--storage/innobase/row/row0merge.cc12
-rw-r--r--storage/innobase/row/row0mysql.cc15
-rw-r--r--storage/innobase/row/row0purge.cc95
-rw-r--r--storage/innobase/row/row0quiesce.cc15
-rw-r--r--storage/innobase/row/row0row.cc77
-rw-r--r--storage/innobase/row/row0sel.cc14
-rw-r--r--storage/innobase/row/row0uins.cc27
-rw-r--r--storage/innobase/row/row0umod.cc49
-rw-r--r--storage/innobase/row/row0upd.cc41
-rw-r--r--storage/innobase/srv/srv0mon.cc112
-rw-r--r--storage/innobase/srv/srv0srv.cc76
-rw-r--r--storage/innobase/srv/srv0start.cc244
-rw-r--r--storage/innobase/trx/trx0purge.cc11
-rw-r--r--storage/innobase/trx/trx0rseg.cc8
-rw-r--r--storage/innobase/trx/trx0sys.cc10
-rw-r--r--storage/innobase/trx/trx0trx.cc12
-rw-r--r--storage/innobase/trx/trx0undo.cc9
-rw-r--r--storage/maria/CMakeLists.txt1
-rw-r--r--storage/maria/aria_chk.c12
-rw-r--r--storage/maria/aria_dump_log.c10
-rw-r--r--storage/maria/aria_pack.c8
-rw-r--r--storage/maria/aria_read_log.c13
-rw-r--r--storage/maria/aria_s3_copy.1 (renamed from man/aria_s3_copy.1)0
-rw-r--r--storage/maria/aria_s3_copy.cc8
-rw-r--r--storage/maria/ha_maria.cc103
-rw-r--r--storage/maria/ha_maria.h9
-rw-r--r--storage/maria/ma_bitmap.c4
-rw-r--r--storage/maria/ma_blockrec.c4
-rw-r--r--storage/maria/ma_check.c21
-rw-r--r--storage/maria/ma_control_file.c2
-rw-r--r--storage/maria/ma_control_file.h2
-rw-r--r--storage/maria/ma_extra.c9
-rw-r--r--storage/maria/ma_info.c8
-rw-r--r--storage/maria/ma_key.c40
-rw-r--r--storage/maria/ma_loghandler.c133
-rw-r--r--storage/maria/ma_loghandler.h4
-rw-r--r--storage/maria/ma_pagecache.c4
-rw-r--r--storage/maria/ma_recovery.c12
-rw-r--r--storage/maria/ma_recovery_util.c2
-rw-r--r--storage/maria/ma_rkey.c1
-rw-r--r--storage/maria/ma_scan.c4
-rw-r--r--storage/maria/ma_write.c9
-rw-r--r--storage/maria/maria_def.h39
-rw-r--r--storage/mroonga/ha_mroonga.cpp87
-rw-r--r--storage/mroonga/ha_mroonga.hpp20
-rw-r--r--storage/mroonga/mysql-test/mroonga/storage/r/optimization_count_skip_index_not_equal.result3
-rw-r--r--storage/mroonga/mysql-test/mroonga/storage/t/optimization_count_skip_index_not_equal.test1
-rw-r--r--storage/mroonga/mysql-test/mroonga/wrapper/r/geometry_contains.result2
-rw-r--r--storage/myisam/ha_myisam.cc63
-rw-r--r--storage/myisam/ha_myisam.h169
-rw-r--r--storage/myisam/mi_extra.c4
-rw-r--r--storage/myisam/mi_key.c42
-rw-r--r--storage/myisam/mi_rkey.c2
-rw-r--r--storage/myisam/mi_scan.c4
-rw-r--r--storage/myisam/myisamchk.c10
-rw-r--r--storage/myisam/myisamdef.h13
-rw-r--r--storage/myisam/myisamlog.c5
-rw-r--r--storage/myisam/myisampack.c9
-rw-r--r--storage/myisammrg/ha_myisammrg.cc35
-rw-r--r--storage/myisammrg/ha_myisammrg.h115
-rw-r--r--storage/oqgraph/ha_oqgraph.h7
-rw-r--r--storage/perfschema/ha_perfschema.h6
-rw-r--r--storage/rocksdb/CMakeLists.txt2
-rw-r--r--storage/rocksdb/ha_rocksdb.cc39
-rw-r--r--storage/rocksdb/ha_rocksdb.h16
-rw-r--r--storage/rocksdb/mariadb-ldb.1 (renamed from man/mysql_ldb.1)0
-rw-r--r--storage/rocksdb/myrocks_hotbackup.1 (renamed from man/myrocks_hotbackup.1)0
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result16
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/mariadb_port_fixes.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result60
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result10
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/select.result6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/mariadb_port_fixes.test4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test4
-rw-r--r--storage/rocksdb/tools/mysql_ldb.cc1
-rw-r--r--storage/sequence/mysql-test/sequence/group_by.result2
-rw-r--r--storage/sequence/sequence.cc66
-rw-r--r--storage/sphinx/ha_sphinx.h30
-rw-r--r--storage/spider/ha_spider.cc82
-rw-r--r--storage/spider/ha_spider.h13
-rw-r--r--storage/spider/mysql-test/spider/bg/r/spider_fixes.result1
-rw-r--r--storage/spider/mysql-test/spider/bugfix/include/sql_mode_init.inc16
-rw-r--r--storage/spider/mysql-test/spider/bugfix/r/quick_mode_1.result4
-rw-r--r--storage/spider/mysql-test/spider/bugfix/t/mdev_22246.test1
-rw-r--r--storage/spider/mysql-test/spider/bugfix/t/mdev_27172.test4
-rw-r--r--storage/spider/mysql-test/spider/bugfix/t/quick_mode_1.test1
-rw-r--r--storage/spider/mysql-test/spider/r/direct_left_join_nullable.result2
-rw-r--r--storage/spider/mysql-test/spider/r/direct_left_right_join_nullable.result2
-rw-r--r--storage/spider/mysql-test/spider/r/direct_right_join_nullable.result2
-rw-r--r--storage/spider/mysql-test/spider/r/direct_right_left_right_join_nullable.result2
-rw-r--r--storage/spider/mysql-test/spider/r/partition_mrr.result48
-rw-r--r--storage/spider/mysql-test/spider/r/spider_fixes.result1
-rw-r--r--storage/spider/mysql-test/spider/t/partition_mrr.test1
-rw-r--r--storage/spider/spd_conn.cc41
-rw-r--r--storage/spider/spd_conn.h3
-rw-r--r--storage/spider/spd_copy_tables.cc9
-rw-r--r--storage/spider/spd_db_conn.cc3
-rw-r--r--storage/spider/spd_db_include.h2
-rw-r--r--storage/spider/spd_db_mysql.cc89
-rw-r--r--storage/spider/spd_direct_sql.cc2
-rw-r--r--storage/spider/spd_include.h2
-rw-r--r--storage/spider/spd_init_query.h33
-rw-r--r--storage/spider/spd_ping_table.cc8
-rw-r--r--storage/spider/spd_sys_table.cc44
-rw-r--r--storage/spider/spd_table.cc52
-rw-r--r--storage/spider/spd_trx.cc43
-rw-r--r--storage/spider/spd_trx.h6
-rwxr-xr-xtests/check_costs.pl1023
-rw-r--r--tests/mysql_client_fw.c6
-rw-r--r--tests/prev_record.cc466
1408 files changed, 41614 insertions, 33820 deletions
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 367fa07589c..fbba6e12f12 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -40,10 +40,11 @@ carefully describe how to test manually.
<!--
Tick one of the following boxes [x] to help us understand
if the base branch for the PR is correct
+(Currently the earliest maintained branch is 10.3)
-->
## Basing the PR against the correct MariaDB version
- [ ] *This is a new feature and the PR is based against the latest MariaDB development branch*
-- [ ] *This is a bug fix and the PR is based against the earliest branch in which the bug can be reproduced*
+- [ ] *This is a bug fix and the PR is based against the earliest maintained branch in which the bug can be reproduced*
<!--
You might consider answering some questions like:
@@ -55,3 +56,6 @@ You might consider answering some questions like:
-->
## Backward compatibility
TODO: fill details here, if applicable, or remove the section
+
+## PR quality check
+- [ ] I checked the [CODING_STANDARDS.md](https://github.com/MariaDB/server/blob/11.0/CODING_STANDARDS.md) file and my PR conforms to this where appropriate.
diff --git a/.gitignore b/.gitignore
index 2fb3857120c..f86d7676413 100644
--- a/.gitignore
+++ b/.gitignore
@@ -239,6 +239,7 @@ storage/perfschema/pfs_config.h
storage/rocksdb/ldb
storage/rocksdb/myrocks_hotbackup
storage/rocksdb/mysql_ldb
+storage/rocksdb/mysql_ldb.1
storage/rocksdb/rdb_source_revision.h
storage/rocksdb/sst_dump
strings/conf_to_src
@@ -266,6 +267,7 @@ support-files/mysql.10.0.11.spec
support-files/mysql.server
support-files/mysql.service
support-files/mysql.spec
+support-files/mysql-log-rotate
support-files/mysqld.service
support-files/mysqld_multi.server
support-files/policy/selinux/mysqld-safe.pp
@@ -569,39 +571,38 @@ extra/mariadb-waitpid
libmysqld/examples/mariadb-client-test-embedded
libmysqld/examples/mariadb-embedded
libmysqld/examples/mariadb-test-embedded
-man/mariadb.1
-man/mariadb-access.1
-man/mariadb-admin.1
-man/mariadb-backup.1
-man/mariadb-binlog.1
-man/mariadb-check.1
-man/mariadb-client-test.1
-man/mariadb-client-test-embedded.1
-man/mariadb_config.1
-man/mariadb-convert-table-format.1
-man/mariadbd.8
-man/mariadbd-multi.1
-man/mariadbd-safe.1
-man/mariadbd-safe-helper.1
-man/mariadb-dump.1
-man/mariadb-dumpslow.1
-man/mariadb-embedded.1
-man/mariadb-find-rows.1
-man/mariadb-fix-extensions.1
-man/mariadb-hotcopy.1
-man/mariadb-import.1
-man/mariadb-install-db.1
-man/mariadb-ldb.1
-man/mariadb-plugin.1
-man/mariadb-secure-installation.1
-man/mariadb-setpermission.1
-man/mariadb-show.1
-man/mariadb-slap.1
-man/mariadb-test.1
-man/mariadb-test-embedded.1
-man/mariadb-tzinfo-to-sql.1
-man/mariadb-upgrade.1
-man/mariadb-waitpid.1
+man/mariabackup.1
+man/mysql.1
+man/mysql_client_test.1
+man/mysql_client_test_embedded.1
+man/mysql_config.1
+man/mysql_convert_table_format.1
+man/mysql_embedded.1
+man/mysql_find_rows.1
+man/mysql_fix_extensions.1
+man/mysql_install_db.1
+man/mysql_plugin.1
+man/mysql_secure_installation.1
+man/mysql_setpermission.1
+man/mysql_tzinfo_to_sql.1
+man/mysql_upgrade.1
+man/mysql_waitpid.1
+man/mysqlaccess.1
+man/mysqladmin.1
+man/mysqlbinlog.1
+man/mysqlcheck.1
+man/mysqld.8
+man/mysqld_multi.1
+man/mysqld_safe.1
+man/mysqld_safe_helper.1
+man/mysqldump.1
+man/mysqldumpslow.1
+man/mysqlhotcopy.1
+man/mysqlimport.1
+man/mysqlshow.1
+man/mysqlslap.1
+man/mysqltest.1
+man/mysqltest_embedded.1
scripts/mariadb-access
scripts/mariadb-convert-table-format
scripts/mariadbd-multi
diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh
index d990608f14e..9b3a584bbab 100755
--- a/BUILD/SETUP.sh
+++ b/BUILD/SETUP.sh
@@ -127,7 +127,7 @@ get_make_parallel_flag
# implementation of SSL. --with-ssl=yes will first try system library
# then the bundled one --with-ssl=system will use the system library.
# We use bundled by default as this is guaranteed to work with Galera
-SSL_LIBRARY=--with-ssl
+SSL_LIBRARY=--with-ssl=bundled
if [ "x$warning_mode" = "xpedantic" ]; then
warnings="-W -Wall -ansi -pedantic -Wno-long-long -Wno-unused -D_POSIX_SOURCE"
@@ -202,6 +202,7 @@ base_configs="$base_configs --with-extra-charsets=complex "
base_configs="$base_configs --enable-thread-safe-client "
base_configs="$base_configs --with-big-tables $maintainer_mode"
base_configs="$base_configs --with-plugin-aria --with-aria-tmp-tables --with-plugin-s3=STATIC"
+base_configs="$base_configs $SSL_LIBRARY"
if test -d "$path/../cmd-line-utils/readline"
then
@@ -212,10 +213,10 @@ then
fi
max_plugins="--with-plugins=max"
-max_no_embedded_configs="$SSL_LIBRARY $max_plugins"
-max_no_qc_configs="$SSL_LIBRARY $max_plugins --without-query-cache"
-max_configs="$SSL_LIBRARY $max_plugins --with-embedded-server --with-libevent --with-plugin-rocksdb=dynamic --with-plugin-test_sql_discovery=DYNAMIC --with-plugin-file_key_management=DYNAMIC --with-plugin-hashicorp_key_management=DYNAMIC"
-all_configs="$SSL_LIBRARY $max_plugins --with-embedded-server --with-innodb_plugin --with-libevent"
+max_no_embedded_configs="$max_plugins"
+max_no_qc_configs="$max_plugins --without-query-cache"
+max_configs="$max_plugins --with-embedded-server --with-libevent --with-plugin-rocksdb=dynamic --with-plugin-test_sql_discovery=DYNAMIC --with-plugin-file_key_management=DYNAMIC --with-plugin-hashicorp_key_management=DYNAMIC --with-plugin-auth_gssapi=DYNAMIC"
+all_configs="$max_plugins --with-embedded-server --with-innodb_plugin --with-libevent"
#
# CPU and platform specific compilation flags.
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e76b976a23c..e9eca293603 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -27,7 +27,7 @@ IF(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
"None" "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
ENDIF()
-PROJECT(MySQL)
+PROJECT(MariaDB)
# Remove the following comment if you don't want to have striped binaries
# in RPM's:
@@ -88,6 +88,7 @@ ELSE()
SET(CMAKE_CXX_STANDARD 11)
ENDIF()
+# Lower case package names from PROJECT are used if not explictly upper case.
SET(CPACK_PACKAGE_NAME "MariaDB")
SET(CPACK_PACKAGE_DESCRIPTION_SUMMARY "MariaDB: a very fast and robust SQL database server")
SET(CPACK_PACKAGE_URL "http://mariadb.org")
diff --git a/CODING_STANDARDS.md b/CODING_STANDARDS.md
new file mode 100644
index 00000000000..a35ce57d45c
--- /dev/null
+++ b/CODING_STANDARDS.md
@@ -0,0 +1,301 @@
+# Coding Standards
+
+This is a working document outlining the coding standard for the general MariaDB codebase.
+The document can be found in the 11.0 and newer trees in the root directory as "CODING_STANDARDS.md"
+
+It does not cover the coding standards for individual plugins, these should have their own coding standards documentation.
+
+## Using Git with the MariaDB codebase
+
+### Git commit messages
+
+Git commit messages must conform to the 50/72 rule.
+This is a de facto git standard which is automatically enforced by some editors.
+This means:
+
+* 50 characters max for the first (description) line (see exception later)
+* A blank line.
+* 72 characters max for every subsequent line.
+
+In addition if there is a Jira ticket number, this should be the first thing in the description.
+As an example:
+
+```
+MDEV-12345 Fixing Rockwell Turbo Encabulator
+
+The new principle involved is that instead of power being generated by
+the relative motion of conductors and fluxes, it’s produced by the
+modial interaction of magneto-reluctance and capacitive diractance.
+```
+
+The only explicitly allowed exception to the 50-72 rules is that if the first line can be MDEV-###### title', even if the title would make the line longer than 50 characters.
+
+The commit messages are typically rendered in [Markdown format](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax), so markdown formatting is permitted for the message body.
+
+### Branch handling
+
+When updating your code, please make sure you perform a rebase, not a merge with the latest branch.
+Pull requests should be a simple fast-forward of the branch they are intended to land on.
+
+The correct way to rebase (if working on top of 10.3 branch):
+
+```sh
+git fetch upstream/10.3 # This assumes upstream is github.com/MariaDB/server
+git rebase upstream/10.3
+git push --force my_branch
+```
+
+## Coding Style (C / C++ files)
+
+Everyone has a preferred coding style, there is no real correct style for all projects around the world.
+What is important is that we stick to one common style throughout this code base.
+
+### Indentation
+
+We should use a variant of the [Allman indentation style](https://en.wikipedia.org/wiki/Indentation_style#Allman_style).
+The variation is to use two spaces instead of tabs and has a couple of minor rule changes as below.
+
+Allman style specifies that braces associated with a statement should be on the following line with the same indentation and the statements inside the braces are next level indented.
+The closing braces are also on a new line at the same indentation as the original statement.
+
+For example:
+
+```cpp
+while (x == y)
+{
+ something();
+ somethingelse();
+}
+finalthing();
+```
+
+#### Switch / Case statements
+
+For switch / case statements the `case` needs to be inline with the `switch`.
+Preferably switch (expr) should be followed by '{' on the same line to
+make the lineup of 'case:' nice:
+
+For example:
+
+```cpp
+switch(level) {
+case ERROR:
+ sql_print_error("Error: %s", message.c_ptr_safe());
+ break;
+case WARNING:
+ sql_print_warning("Warning: %s", message.c_ptr_safe());
+ break;
+...
+}
+```
+
+#### If statements
+
+If the `if` statement only executes one line of code it is possible to write the statement without the braces such as this:
+
+```cpp
+if (opt_console)
+ opt_error_log= 0;
+```
+
+Prefer reducing indent level with the use of early return statements (or in special circumstances goto).
+Rather than:
+
+```cpp
+if (condition)
+{
+ <logic>
+}
+return error_code;
+```
+
+Use:
+
+```cpp
+if (!condition)
+ return error_code;
+<logic>
+return success;
+```
+
+### File names
+
+File names should be lower case with underscore word separators.
+C file names use the `.c` extension, C++ files use the `.cc` extension and header files use the `.h` extension.
+
+### Language standards
+
+For pure-C files we use C99 and for C++ we use C++11.
+The code need to be able to compile on multiple platforms using different compilers (for example: Windows / Linux, x86_64 / ARM).
+
+### Line lengths
+
+Lines should be no more than 80 characters.
+The reason for this is that it makes it easier to have multiple editor
+windows open side by side and still keep code readable without line
+wraps.
+
+When breaking long lines:
+- use '()' to group expressions so that the editor can automatically help
+ you with the indentation.
+- When breaking expressions, leave the operator (+,- etc) last on the previous
+ line.
+
+```cpp
+rows= tab->table->file->multi_range_read_info(tab->ref.key, 10, 20, tab->ref.key_parts, &bufsz, &flags, &cost);
+->
+rows= tab->table->file->multi_range_read_info(tab->ref.key, 10, 20,
+ tab->ref.key_parts, &bufsz,
+ &flags, &cost);
+```
+
+```cpp
+tmp= aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb;
+->
+tmp= (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+
+ bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb);
+```
+
+### Comments
+
+Single line / inline code comments can use the double slash (`//`) style of coding, whereas multi-line code comments should use `/*` as a start and `*/` at the end, with the text indented by 2 spaces, for example:
+
+```cpp
+/*
+ This is a multi-line code comment.
+ It has an indentation of two spaces.
+*/
+```
+
+### Variables classes, and functions
+
+Variables and functions should be descriptive and in "snake case", for example:
+
+```cpp
+void my_function(uint16 variable_name)
+{
+```
+
+Class names should also be "snake case" but should start with an upper-case character.
+Such as this:
+
+```cpp
+class Buffered_logs
+{
+```
+
+Assignments should not have a space on the left side of the equals, and one space on the right hand side. For example:
+
+```cpp
+a= 1; // Correct
+a = 1; // Incorrect for the server code,
+ // ok for Storage Engines if they use it (aka Connect)
+```
+
+The above makes it easy to use 'grep' to find all assignments to a variable.
+
+Please do not write conditions like this:
+
+```cpp
+if (0 == *error_code)
+```
+
+Please do this instead:
+
+```cpp
+if (*error_code == 0)
+// Or even better
+if (!*error_code)
+```
+
+Only use one-character variables (i,j,k...) in short loops. For anything else
+use descriptive names!
+
+### Variable declarations
+
+Variables should be declared at the start of it's context (start of function, inside the 'if' statement.
+
+The benefits of this:
+- Code lines gets shorter
+- It is easier to see the stack space used by a function.
+- It is easer to find the declaration of the variable.
+- If one has to add an 'if (error) goto end' construct, one can do
+ that without having to move variable declarations around.
+
+
+### Constant integers
+
+Constant integers that are used to define elements such as buffer sizes should be defined rather than used directly.
+This is because the integer could change and uses of it could be missed.
+For example:
+
+```cpp
+char *buffer= my_malloc(PSI_INSTRUMENT_ME, 1024, MYF(MY_WME));
+
+snprint(buffer, 1024, "%d: %s", integer, text);
+```
+
+Could become:
+
+```cpp
+constexpr int buffer_size= 1024;
+char *buffer= my_malloc(PSI_INSTRUMENT_ME, buffer_size, MYF(MY_WME));
+
+snprint(buffer, buffer_size, "%d: %s", integer, text);
+```
+
+Alternatively the integer can be defined using an `enum` or `#define`.
+
+### Spacing
+
+#### Whitespace
+
+* Lines should not have any trailing whitespace.
+* There should not be any trailing blank lines at the end of a file.
+* Line endings are POSIX style (`\n`).
+* Two spaces for each indentation level, not tabs.
+
+#### Pointers
+
+The `*` of a pointer should be on the side of the variable name such as:
+
+```cpp
+void my_function(THD *thd)
+{
+```
+
+As yet there is no standard as to whether the `*` in a casting should have a space or not.
+Both of these are valid:
+
+```cpp
+name= (const char*)db_name;
+name= (const char *) db_name;
+```
+
+#### Function variables
+
+There should be a space after each comma in a definition and usage of a function.
+For example:
+
+```cpp
+my_function(thd, db_name);
+```
+
+### Types
+
+In general the usage of types such as `char`, `int` and `long` should be discouraged but there are shortened versions of the unsigned variants available for these in `my_global.h`.
+They can be different sizes across platforms and `char` can be either unsigned or signed depending on platform, and therefore are not portable.
+Instead these should be used as appropriate:
+
+* 8-bit signed / unsigned int -> `int8` / `uint8`
+* 16-bit signed / unsigned int -> `int16` / `uint16`
+* 32-bit signed / unsigned int -> `int32` / `uint32`
+* 64-bit signed / unsigned int -> `int64` / `uint64`
+* Integer file descriptor -> `File`
+* Integer socket descriptor -> `my_socket`
+
+`size_t` and `ptrdiff_t` are used in the source where appropriate, buffer sizes for example.
+It should be noted that these are implementation dependent but are useful when used in the correct context.
+
+Further types can be found in the `include/` directory files.
+There are also general utility functions in `mysys`.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 37899f37958..c6744bee199 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -22,6 +22,7 @@ MariaDB Server has a vibrant community contributing in a wide range of areas. Th
- Write code to fix bugs or develop new features (see [Getting Started for Developers](https://mariadb.org/getting-started-for-developers)).See also [list of beginner friendly tasks](https://jira.mariadb.org/browse/MDEV-15736?jql=resolution%20%3D%20Unresolved%20AND%20labels%20%3D%20beginner-friendly%20ORDER%20BY%20updated%20DESC)
- Help with code quality control
- Participate in packaging for different Linux distributions
+- Coding standards for the main source code can be found in [CODING_STANDARDS.md](CODING_STANDARDS.md).
### Sponsor or donate
---
diff --git a/Docs/optimizer_costs.txt b/Docs/optimizer_costs.txt
new file mode 100644
index 00000000000..dcb8bca7a23
--- /dev/null
+++ b/Docs/optimizer_costs.txt
@@ -0,0 +1,1309 @@
+This file is intended to explain some of the optimizer cost variables
+in MariaDB 11.0
+
+Background
+==========
+
+Most timings has come from running:
+
+./check_costs.pl --rows=1000000 --socket=/tmp/mysql-dbug.sock --comment="--aria-pagecache-buffer-size=10G --innodb-buffer_pool_size=10G --key_buffer-size=1G --max-heap-table-size=10G"
+
+The MariaDB server is started with the options:
+--aria-pagecache-buffer-size=10G --innodb-buffer_pool_size=10G --key_buffer-size=1G --max-heap-table-size=10G"
+
+- All costs are changed to be milliseconds for engine operations and
+ other calculations, like the WHERE clause. This is a big change from
+ before the patch that added this file where the basic cost was a
+ disk seek and one index read and we assumed they had the same cost.
+- I am using Aria as the 'base' cost. This is because it caches all data,
+ which most other engines also would do.
+- MyISAM cannot be used as 'base' as it does not cache row data (which gives
+ a high overhead when doing row lookups).
+- Heap is in memory and a bit too special (no caching).
+- InnoDB is a clustered engine where secondary indexes has to use
+ the clustered index to find a row (not a common case among storage engines).
+
+The old assumption in the optimzer has 'always' been that
+1 cost = 1 seek = 1 index = 1 row lookup = 0.10ms.
+However 1 seek != 1 index or row look and this has not been reflected in
+most other cost.
+This document is the base of changing things so that 1 cost = 1ms.
+
+
+Setup
+=====
+
+All timings are calculated based on result from this computer:
+CPU: Intel(R) Xeon(R) W-2295 CPU @ 3.00GHz
+Memory: 256G
+Disk: Samsum SSD 860 (not really relevant in this case)
+Rows in tests: 1M Each test is run 3 times
+(one test to cache the data and 2 runs of which we take the average).
+
+The assumption is that other computers will have somewhat proportional
+timings. The timings are done with all data in memory (except MyISAM rows).
+This is reflected in the costs for the test by setting
+optimizer_disk_read_ratio=0.
+
+Note that even on a single Linux computer without any notable tasks
+the run time vary a bit from run to run (up to 4%), so the numbers in
+this document cannot be repeated exactly but should be good enough for
+the optimizer.
+
+Timings for disk accesses on other system can be changed by setting
+optimizer_disk_read_cost (usec / 4092 bytes) to match the read speed.
+
+Default values for check_costs.pl:
+optimizer_disk_read_ratio= 0 Everything is cached
+SCAN_LOOKUP_COST=1 Cost modifier for scan (for end user)
+set @@optimizer_switch='index_condition_pushdown=off'";
+
+
+ROW_COPY_COST and KEY_COPY_COST
+===============================
+
+Regarding ROW_COPY_COST:
+When calulating cost of fetching a row, we have two alternativ cost
+parts (in addition to other costs):
+scanning: rows * (ROW_NEXT_FIND_COST + ROW_COPY_COST)
+rnd_pos: rows * (ROW_LOOKUP_COST + ROW_COPY_COST)
+
+In theory we could remove ROW_COPY_COST and just move the cost
+to the two other variables. However, in the future there may reason
+to be able to modif row_copy_cost per table depending on number and type
+of fields (A table of 1000 fields should have a higher row copy cost than
+a table with 1 field). Because of this, I prefer to keep ROW_COPY_COST
+around for now.
+
+Regarding KEY_COPY_COST:
+When calulating cost of fetching a key we have as part of the cost:
+keyread_time: rows * KEY_COPY_COST + ranges * KEY_LOOKUP_COST +
+ (rows-ranges) * KEY_NEXT_FIND_COST
+key_scan_time: rows * (KEY_NEXT_FIND_COST + KEY_COPY_COST)
+
+We could remove KEY_COPY_COST by adding it to KEY_LOOKUP_COST and
+KEY_NEXT_FIND_COST but I prefer to keep it with the same argument as
+for ROW_COPY_COST.
+
+The reation between KEY_COPY_COST / (KEY_NEXT_FIND_COST + KEY_COPY_COST)
+is assumed to be 0.1577 (See analyze in the appendix)
+
+There is a relationship between the above costs in that for a clustered
+index the cost is calculated as ha_keyread_time() + ROW_COPY_COST.
+
+
+Preramble
+=========
+
+I tried first to use performance schema to get costs, but I was not
+successful as all timings I got for tables showed the total time
+executing the statement, not the timing for doing the actual reads.
+Also the overhead of performance schema affected the results
+
+With --performance-schema=on
+
+MariaDB [test]> select sum(1) from seq_1_to_100000000;
++-----------+
+| sum(1) |
++-----------+
+| 100000000 |
++-----------+
+1 row in set (4.950 sec)
+
+Performance schema overhead: 30.1%
+
+With:
+UPDATE performance_schema.setup_consumers SET ENABLED = 'YES';
+UPDATE performance_schema.setup_instruments SET ENABLED = 'YES', TIMED = 'YES';
+
+Flush with:
+CALL sys.ps_truncate_all_tables(FALSE);
+
+Performance schema overhead now: 32.9%
+
+Timings from:
+select * from events_statements_current where thread_id=80;
+
+MariaDB [test]> select 885402302809000-884884140290000;
++---------------------------------+
+| 885402302809000-884884140290000 |
++---------------------------------+
+| 518162519000 |
++---------------------------------+
+-> Need to divide by 1000000000000.0 to get seconds
+
+As seen above, the above gives the total statement time not the time
+spent to access the tables.
+
+In the end, I dediced to use analyze to find out the cost of the table
+actions:
+
+For example: Finding out table scan timing (and thus costs):
+
+analyze format=json select sum(1) from seq_1_to_100000000;
+r_table_time_ms": 1189.239022
+
+
+Calculating 'optimizer_where_cost'
+==================================
+
+To make the WHERE cost reasonble (not too low) we are assuming there is
+2 simple conditions in the default 'WHERE clause'
+
+MariaDB [test]> select benchmark(100000000,l_commitDate >= '2000-01-01' and l_tax >= 0.0) from test.check_costs limit 1;
++--------------------------------------------------------------------+
+| benchmark(100000000,l_commitDate >= '2000-01-01' and l_tax >= 0.0) |
++--------------------------------------------------------------------+
+| 0 |
++--------------------------------------------------------------------+
+1 row in set (3.198 sec)
+
+Time of where in seconds: 3.198 / 100000000 (100,000,000)
+
+Verification:
+
+select sum(1) from seq_1_to_100000000 where seq>=0.0 and seq>=-1.0;
++-----------+
+| sum(1) |
++-----------+
+| 100000000 |
++-----------+
+1 row in set (8.564 sec)
+
+MariaDB [test]> select sum(1) from seq_1_to_100000000;
++-----------+
+| sum(1) |
++-----------+
+| 100000000 |
++-----------+
+1 row in set (5.162 sec)
+
+Time of where= (8.564-5.162)/100000000 = 3.402/100000000 (100,000,000)
+(Result good enough, as sligthly different computations)
+
+check_costs.pl comes provides the numbers when using heap tables and 1M rows:
+
+simple where: 118.689 ms
+complex where: 138.474 ms
+no where: 83.699 ms
+
+Which gives for simple where:
+(118.689-83.699)/1000 = 0.034990000000000007 ms
+Which is in the same ballpark.
+
+We use the result from the select benchmark run as this has least overhead
+and is easiest to repeat and verify in a test.
+Which gives:
+optimizer_where_cost= 0.032 ms / WHERE.
+
+
+HEAP TABLE SCAN & ROW_COPY_COST
+===============================
+
+We start with heap as all rows are in memory and we don't have to take
+disk reads into account.
+
+select sum(l_partkey) from test.check_costs
+table_scan ms: 10.02078736
+rows: 1000000
+
+Cost should be 10.02078736 (scan cost) + 32 (where cost)
+
+cost= scan_time() * optimizer_cache_cost * SCAN_LOOKUP_COST +
+ TABLE_SCAN_SETUP_COST +
+ records * (ROW_COPY_COST + ROW_LOOKUP_COST + WHERE_COMPARE_COST);
+
+=>
+We are ignoring TABLE_SCAN_SETUP (which is just to prefer index lookup on small
+tables).
+We can also ignore records * WHERE_COMPARE_COST as we don't have that
+in the above calcuated 'ms'.
+row_costs= (ROW_COPY_COST + ROW_LOOKUP_COST)
+
+cost= scan_time() * 1 * 1 +
+ 1000000.0 * (row_costs)
+=>
+cost= time_per_row*1000000 + row_costs * 1000000;
+=>
+time_per_row+row_cost= cost/1000000
+
+Let's assume that for heap, finding the next row is 80 % of the time and
+copying the row (a memcmp) to upper level is then 20 %.
+(This is not really important, we could put everthing in heap_scan_time,
+but it's good to have split the data as it gives us more options to
+experiment later).
+
+row_lookup_cost= 10.02078736/1000000*0.8 = 8.0166298880000005e-06
+row_copy_cost= 10.02078736/1000000*0.2 = 2.0041574720000001e-06
+
+Conclusion:
+heap_scan_time= 8.0166e-06
+row_copy_cost= 2.0042e-06
+
+Heap doesn't support key only read, so key_copy_cost is not relevant for it.
+
+
+HEAP INDEX SCAN
+===============
+
+select count(*) from test.check_costs_heap force index (l_suppkey) where l_suppkey >= 0 and l_partkey >=0
+index_scan time: 79.7286117 ms
+
+Index scan on heap tables can only happen with binary trees.
+l_supp_key is using a binary tree.
+
+cost= (ranges + rows + 1) * BTREE_KEY_NEXT_FIND_COST + rows * row_copy_cost=
+(for large number of rows):
+rows * (BTREE_KEY_NEXT_FIND_COST + row_copy_cost)
+
+BTREE_KEY_NEXT_FIND_COST= cost/rows - row_copy_cost =
+79.7286117/1000000- 2.334e-06= 0.0000773946117
+
+
+HEAP EQ_REF
+===========
+
+select straight_join count(*) from seq_1_to_1000000,test.check_costs_heap where seq=l_linenumber
+eq_ref_index_join time: 175.874165 of which 12.57 is from seq_1_to_1000000
+
+Note: This is 34% of the cost of an Aria table with index lookup and
+ 20% of an Aria table with full key+row lookup.
+
+cost= rows * (key_lookup_cost + row_copy_cost)
+key_lookup_cost= cost/rows - key_copy_cost =
+(175.874165-12.57)/1000000 - 2.334e-06 = 0.00016097016500000002
+
+
+HEAP EQ_REF on binary tree index
+================================
+
+select straight_join count(*) from seq_1_to_1000000,test.check_costs_heap where seq=l_extra and l_partkey >= 0
+eq_ref_join time: 241.350539 ms of which 12.57 is from seq_1_to_1000000
+
+rows * (tree_find_cost() + row_copy_cost) =
+
+tree_find_cost()= cost/rows - row_copy_cost =
+
+(241.350539-12.57)/1000000 - 2.334e-06= 0.000226446539
+
+tree_find_cost() is defined as key_compare_cost * log2(table_rows)
+->
+key_compare_cost= 0.000226446539/log2(1000000) = 0.000011361200108882259;
+
+
+SEQUENCE SCAN
+=============
+
+analyze format=json select sum(seq+1) from seq_1_to_1000000;
+r_table_time_ms: 12.47830611
+
+Note that for sequence index and table scan is the same thing.
+We need to have a row_copy/key_copy cost as this is used when doing
+an key lookup for sequence. Setting these to 50% of the full cost
+should be sufficent for now.
+
+Calculation sequence_scan_cost:
+
+When ignoring reading from this, the cost of table scan is:
+rows * (ROW_NEXT_FIND_COST + ROW_COPY_COST)
+
+The cost of key scan is:
+ranges * KEY_LOOKUP_COST + (rows - ranges) * KEY_NEXT_FIND_COST +
+rows * KEY_COPY_COST;
+
+As there is no search after first key for sequence, we can set
+KEY_LOOKUP_COST = KEY_NEXT_FIND_COST.
+
+This gives us:
+
+r_table_time_ms = (ROW_NEXT_FIND_COST + ROW_COPY_COST) =
+ (KEY_NEXT_FIND_COST + KEY_COPY_COST) * 1000000;
+
+->
+ROW_NEXT_FIND_COST= ROW_COPY_COST = KEY_LOOKUP_COST + KEY_COPY_COST=
+12.47830611/1000000/2 = 0.0000062391530550
+
+
+HEAP KEY LOOKUP
+===============
+
+We can use this code to find the timings of a index read in a table:
+
+analyze format=json select straight_join count(*) from seq_1_to_1000000,check_costs where seq=l_orderkey
+
+"query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": 420.5083447,
+ "table": {
+ "table_name": "seq_1_to_1000000",
+ "access_type": "index",
+ "possible_keys": ["PRIMARY"],
+ "key": "PRIMARY",
+ "key_length": "8",
+ "used_key_parts": ["seq"],
+ "r_loops": 1,
+ "rows": 1000000,
+ "r_rows": 1000000,
+ "r_table_time_ms": 12.47830611,
+ "r_other_time_ms": 44.0671283,
+ "filtered": 100,
+ "r_filtered": 100,
+ "using_index": true
+ },
+ "table": {
+ "table_name": "check_costs",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["test.seq_1_to_1000000.seq"],
+ "r_loops": 1000000,
+ "rows": 1,
+ "r_rows": 1,
+ "r_table_time_ms": 160
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "seq_1_to_1000000.seq = check_costs.l_orderkey"
+ }
+ }
+
+This gives the time for a key lookup on hash key as:
+160/10000000 - row_copy_cost =
+160/1000000.0 - 2.0042e-06 = 0.00015799580000000002
+
+
+ARIA TABLE SCAN
+===============
+(page format, all rows are cached)
+
+table_scan ms: 107.315698
+
+Cost is calculated as:
+
+blocks= stats.data_file_length / stats.block_size) = 122888192/4096= 30002
+engine_blocks (8192 is block size in Aria) = 15001
+
+cost= blocks * avg_io_cost() *
+ optimizer_cache_cost * SCAN_LOOKUP_COST +
+ engine_blocks * INDEX_BLOCK_COPY_COST +
+ TABLE_SCAN_SETUP_COST +
+ records * (ROW_NEXT_FIND_COST + ROW_COPY_COST));
+
+When all is in memory (optimizer_cache_cost= 0) we get:
+
+cost= blocks * INDEX_BLOCK_COPY_COST +
+ TABLE_SCAN_SETUP_COST +
+ records * (ROW_NEXT_FIND_COST + ROW_COPY_COST));
+
+To calculate INDEX_BLOCK_COPY_COST I added a temporary tracker in
+ma_pagecache.cc::pagecache_read() and did run the same query.
+I got the following data:
+{counter = 17755, sum = 1890559}
+Which give me the time for copying a block to:
+1000.0*1890559/sys_timer_info.cycles.frequency/17755 = 3.558138826971332e-05 ms
+And thus INDEX_BLOCK_COPY_COST= 0.035600
+
+Replacing known constants (and ignore TABLE_SCAN_SETUP_COST):
+cost= 107.315698 = 15001 * 3.56e-5 + 1000000 * aria_row_copy_costs;
+
+aria_row_copy_costs= (107.315698 - (15001 * 3.56e-5))/1000000 =
+0.0001067816624
+
+As ROW_COPY_COST/ROW_NEXT_FIND_COST= 0.57 (See appendex)
+
+ROW_COPY_COST= 0.0001067816624 * 0.57 = 0.000060865547560
+ROW_NEXT_FIND_COST= 0.0001067816624 * 0.43 = 0.000045916114832
+
+
+Aria, INDEX SCAN
+================
+
+Finding out cost of reading X keys from an index (no row lookup) in Aria.
+
+Query: select count(*) from test.check_costs_aria force index (l_suppkey) where l_suppkey >= 0 and l_partkey >=0
+Table access time: ms: 98.1427158
+
+blocks= index_size/IO_SIZE =
+(rows * tot_key_length / INDEX_BLOCK_FILL_FACTOR) / IO_SIZE
+->
+1000000 * 19 / 0.75/ 4096 = 6184
+engine_blocks (block_size 8192) = 6184/2 = 3092
+(Range optimzer had calculated 3085)
+
+keyread_time= blocks * avg_io_cost() * cache + engine_blocks * INDEX_BLOCK_COPY_COST + rows * (KEY_NEXT_FIND_COST + KEY_COPY_COST);
+= engine_blocks * INDEX_BLOCK_COPY_COST + rows * KEY_NEXT_FIND_COST=
+ 3092 * 3.56e-05 + 1000000 * (KEY_NEXT_FIND_COST + KEY_COPY_COST)
+->
+KEY_NEXT_FIND_COST + KEY_COPY_COST= (98.1427158 - 3092 * 3.56e-05)/1000000 =
+0.0000980326406;
+
+KEY_COPY_COST= 0.0000980326406 * 0.16 = 0.000015685222496
+KEY_NEXT_FIND_COST= 0.0000980326406 * 0.84 = 0.000082347418104
+
+
+Aria, RANGE SCAN (scan index, fetch a row for each index entry)
+===============================================================
+
+Query:
+select sum(l_orderkey) from test.check_costs_aria force index(l_suppkey) where l_suppkey >= 0 and l_partkey >=0
+range_scan ms: 309.7620909
+
+cost= keyread_time + rnd_pos_time.
+keyread_time is as above in index scan, but whithout KEY_COPY_COST:
+keyread_time= 98.1427158 - KEY_COPY_COST * 1000000=
+98.1427158 - 0.000015685222496 * 1000000= 82.457493304000000;
+rnd_pos_time= 309.7620909 - 82.457493304000000 = 227.304597596000000
+
+rnd_pos_time() = io_cost + engine_mem_cost +
+ rows * (ROW_LOOKUP_COST + ROW_COPY_COST) =
+rows * avg_io_cost() * engine_block_size/IO_SIZE +
+rows * INDEX_BLOCK_COPY_COST +
+rows * (ROW_COPY_COST + ROW_LOOKUP_COST)
+= (When rows are in memory)
+rows * INDEX_BLOCK_COPY_COST +
+rows * (ROW_COPY_COST + ROW_LOOKUP_COST)
+
+This gives us:
+227.304597596000000 = 1000000 * 3.56e-05 + 1000000*(0.000060865547560 + ROW_LOOKUP_COST)
+->
+ROW_LOOKUP_COST= (227.304597596000000 - 1000000 * 3.56e-05 - 1000000*0.000060865547560) / 1000000 = 0.0001308390500
+
+
+Aria, EQ_REF with index_read
+============================
+
+select straight_join count(*) from seq_1_to_1000000,test.check_costs_aria where seq=l_linenumber
+eq_ref_index_join 499.631749 ms
+
+According to analyze statement:
+
+- Cost for SELECT * from seq_1_to_1000000: 12.57
+ (From Last_query_cost after the above costs has been applied)
+- Time from check_costs: eq_ref's: 499.631749- 12.57s = 487.061749
+
+cost= rows * (keyread_time(1,1) + KEY_COPY_COST)
+
+keyread_time(1,1)= INDEX_BLOCK_COPY_COST + KEY_LOOKUP_COST;
+
+cost= rows * (KEY_COPY_COST + INDEX_BLOCK_COPY_COST + KEY_LOOKUP_COST)
+->
+KEY_LOOKUP_COST= cost/rows - 0.000015685222496 - 0.000035600
+KEY_LOOKUP_COST= 487.061749 / 1000000 - 0.000035600 - 0.000015685222496
+KEY_LOOKUP_COST= 0.000435776526504
+
+
+MyISAM, TABLE SCAN
+==================
+
+select sum(l_partkey) from test.check_costs_myisam
+table_scan ms: 126.353364
+
+check_costs.MYD: 109199788 = 26660 IO_SIZE blocks
+The row format for MyISAM is similar to Aria, so we use the same
+ROW_COPY_COST for Aria.
+
+cost= blocks * avg_io_cost() *
+ optimizer_cache_cost * SCAN_LOOKUP_COST +
+ engine_blocks * INDEX_BLOCK_COPY_COST +
+ TABLE_SCAN_SETUP_COST +
+ rows * (ROW_NEXT_FIND_COST + ROW_COPY_COST));
+
+MyISAM is using the file system as a row cache.
+Let's put the cost of accessing the row in ROW_NEXT_FIND_COST.
+Everything is cached (by the file system) and optimizer_cache_cost= 0;
+
+cost= engine_blocks * INDEX_BLOCK_COPY_COST +
+ TABLE_SCAN_SETUP_COST +
+ rows * (ROW_NEXT_FIND_COST + ROW_COPY_COST))
+
+ROW_NEXT_FIND_COST=
+(costs - engine_blocks * INDEX_BLOCK_COPY_COST - TABLE_SCAN_SETUP_COST)/rows -
+ROW_COPY_COST
+=
+(126.353364 - 26660 * 3.56e-05 - 1)/1000000 - 0.000060865547560
+ROW_NEXT_FIND_COST= 0.00006353872044
+
+
+MyISAM INDEX SCAN
+=================
+
+select count(*) from test.check_costs_myisam force index (l_suppkey) where l_suppkey >= 0 and l_partkey >=0;
+index_scan ms: 106.490584
+
+blocks= index_size/IO_SIZE =
+(rows * tot_key_length / INDEX_BLOCK_FILL_FACTOR) / IO_SIZE
+->
+1000000 * 19 / 0.75/ 4096 = 6184
+As MyISAM has a block size of 4096 for this table, engine_blocks= 6184
+
+cost= keyread_time= blocks * avg_io_cost() * cache + engine_blocks * INDEX_BLOCK_COPY_COST + rows * (KEY_NEXT_FIND_COST + KEY_COPY_COST);
+->
+cost= engine_blocks * INDEX_BLOCK_COPY_COST + rows * KEY_NEXT_FIND_COST
+
+Assuming INDEX_BLOCK_COPY_COST is same as in Aria and the code for
+key_copy is identical to Aria:
+cost= 6184 * 3.56e-05 + 1000000 * (KEY_NEXT_FIND_COST + KEY_COPY_COST)
+->
+KEY_NEXT_FIND_COST= (106.490584 - 6184 * 3.56e-05)/1000000 - 0.000015685222496=
+0.000090585211104
+
+
+MyISAM, RANGE SCAN (scan index, fetch a row for each index entry)
+=================================================================
+
+select sum(l_orderkey) from test.check_costs_myisam force index(l_suppkey) where l_suppkey >= 0 and l_partkey >=0 and l_discount>=0.0
+time: 1202.0894 ms
+
+cost= keyread_time + rnd_pos_time.
+keyread_time is as above in MyISAM INDEX SCAN, but without KEY_COPY_COST:
+keyread_time= 106.490584 - KEY_COPY_COST * 1000000=
+106.490584 - 0.000015685222496 * 1000000= 90.805361504000000;
+rnd_pos_time= 1202.0894 - 90.805361504000000 = 1111.284038496000000
+
+rnd_pos_time() = io_cost + engine_mem_cost +
+ rows * (ROW_LOOKUP_COST + ROW_COPY_COST) =
+rows * avg_io_cost() * engine_block_size/IO_SIZE +
+rows * INDEX_BLOCK_COPY_COST +
+rows * (ROW_COPY_COST + ROW_LOOKUP_COST)
+= (When rows are in memory)
+rows * INDEX_BLOCK_COPY_COST +
+rows * (ROW_COPY_COST + ROW_LOOKUP_COST)
+
+This gives us:
+ 1111.284038496000000 = 1000000 * 3.56e-05 + 1000000*(0.000060865547560 + ROW_LOOKUP_COST)
+->
+ROW_LOOKUP_COST= ( 1111.284038496000000 - 1000000 * (3.56e-05 + 0.000060865547560)) / 1000000s
+->
+ROW_LOOKUP_COST= 0.001014818490936
+
+As the row is never cached, we have to ensure that rnd_pos_time()
+doesn't include an io cost (which would be affected by
+optimizer_cache_hit_ratio). This is done by having a special
+ha_myisam::rnd_pos_time() that doesn't include io cost but instead an
+extra cpu cost.
+
+
+MyISAM, EQ_REF with index_read
+==============================
+
+select straight_join count(*) from seq_1_to_1000000,test.check_costs_myisam where seq=l_linenumber;
+eq_ref_join ms: 613.906777 of which 12.48 ms is for seq_1_to_1000000;
+
+According to analyze statement:
+
+- Cost for SELECT * from seq_1_to_1000000: 12.48 (See sequence_scan_cost)
+- Time from check_costs: eq_ref's: 613.906777- 12.48 = 601.426777;
+
+cost= rows * (keyread_time(1) + KEY_COPY_COST)
+
+keyread_time(1)= INDEX_BLOCK_COPY_COST + KEY_LOOKUP_COST;
+
+cost= rows * (KEY_COPY_COST + INDEX_BLOCK_COPY_COST + KEY_LOOKUP_COST)
+->
+KEY_LOOKUP_COST= cost/rows - INDEX_BLOCK_COPY_COST - KEY_COPY_COST;
+601.426777 / 1000000 - 3.56e-05 - 0.000015685222496 = 0.00055014155451
+KEY_LOOKUP_COST= 0.00055014155451
+
+
+
+InnoDB, TABLE SCAN
+==================
+
+select sum(l_quantity) from check_costs_innodb;
+table_scan 131.302492
+Note that InnoDB reported only 956356 rows instead of 100000 in stats.records
+This will will cause the optimizer to calculate the costs based on wrong
+assumptions.
+
+As InnoDB have a clustered index (which cost is a combination of
+KEY_LOOKUP_COST + ROW_COPY_COST), we have to ensure that the
+relationship between KEY_COPY_COST and ROW_COPY_COST is close to the
+real time of copying a key and a row.
+
+I assume, for now, that the row format for InnoDB is not that
+different than for Aria (in other words, computation to unpack is
+about the same), so lets use the same ROW_COPY_COST (0.000060865547560)
+
+I am ignoring the fact that InnoDB can optimize row copying by only
+copying the used fields as the optimizer currently have to take that
+into account. (This would require a way to update ROW_COPY_COST /
+table instance in the query).
+
+For now, lets also use the same value as Aria for
+INDEX_BLOCK_COPY_COST (3.56e-05).
+
+The number of IO_SIZE blocks in the InnoDB data file is 34728 (from gdb))
+(For reference, MyISAM was using 26660 and Aria 30002 blocks)
+As InnoDB is using 16K blocks, the number of engine blocks= 34728/4= 8682
+
+cost= blocks * avg_io_cost() *
+ optimizer_cache_cost * SCAN_LOOKUP_COST +
+ engine_blocks * INDEX_BLOCK_COPY_COST +
+ TABLE_SCAN_SETUP_COST +
+ rows * (ROW_NEXT_FIND_COST + ROW_COPY_COST));
+
+as optimizer_cache_cost = 0
+
+cost= engine_blocks * INDEX_BLOCK_COPY_COST +
+ TABLE_SCAN_SETUP_COST +
+ rows * (ROW_NEXT_FIND_COST + ROW_COPY_COST))
+
+ROW_NEXT_FIND_COST=
+(costs - engine_blocks * INDEX_BLOCK_COPY_COST - TABLE_SCAN_SETUP_COST)/rows -
+ROW_COPY_COST
+= (Ignoring TABLE_SCAN_SETUP_COST, which is just 10 usec)
+(131.302492 - 8682 * 3.56e-05)/1000000 - 0.000060865547560 =
+0.00007012786523999997
+
+
+InnoDB INDEX SCAN
+=================
+
+select count(*) from check_costs_innodb force index (l_suppkey) where l_suppkey >= 0 and l_partkey >=0;
+index_scan 114.733037 ms
+Note that InnoDB is reporting 988768 rows instead of 1000000
+(The number varies a bit between runs. At another run I got 956356 rows)
+With default costs (as of above), we get a query cost of 112.142. This can
+still be improved a bit...
+
+blocks= index_size/IO_SIZE =
+(rows * tot_key_length / INDEX_BLOCK_FILL_FACTOR) / IO_SIZE
+-> (total_key_length is 17 in InnoDB, 19 in Aria)
+1000000 * 17 / 0.75/ 4096 = 5533
+engine_blocks= 5533/4 = 1383
+
+(In reality we get 5293 blocks and 1323 engine blocks, because of the
+difference in InnoDB row count)
+
+cost= keyread_time= blocks * avg_io_cost() * cache + engine_blocks * INDEX_BLOCK_COPY_COST + rows * (KEY_NEXT_FIND_COST + KEY_COPY_COST);
+->
+cost= engine_blocks * INDEX_BLOCK_COPY_COST + rows * KEY_NEXT_FIND_COST
+
+Assuming INDEX_BLOCK_COPY_COST is same as in Aria:
+(Should probably be a bit higher as block_size in InnoDB is 16384
+compared to 8192 in Aria)
+
+cost= 1383 * 3.56e-05 + 1000000 * (KEY_NEXT_FIND_COST + KEY_COPY_COST)
+=
+KEY_NEXT_FIND_COST + KEY_COPY_COST= (114.733037 - 1383 * 3.56e-05)/1000000
+=
+KEY_NEXT_FIND_COST= (114.733037 - 1383 * 3.56e-05)/1000000 - 0.000015685222496
+->
+KEY_NEXT_FIND_COST=0.000098998579704;
+
+Setting this makes InnoDB calculate the cost to 113.077711 (With estimate of
+988768 rows)
+If we would have the right number of rows in ha_key_scan_time, we would
+have got a cost of:
+
+Last_query_cost: 145.077711 (Including WHERE cost for 988768 row)
+(145.077711)/988768*1000000.0-32 = 114.72573444933
+
+
+InnoDB RANGE SCAN
+=================
+
+select sum(l_orderkey) from check_costs_innodb force index(l_suppkey) where l_suppkey >= 0 and l_partkey >=0 and l_discount>=0.0
+range_scan 961.4857045 ms
+Note that InnoDB was reporting 495340 rows instead of 1000000 !
+I added a patch to fix this and now InnoDB reports 990144 rows
+
+cost= keyread_time + rnd_pos_time.
+keyread_time is as above in index scan, but we want it without KEY_COPY_COST:
+keyread_time= cost - KEY_COPY_COST * 1000000=
+114.733037 - 0.000015685222496 * 1000000= 99.047814504000000
+rnd_pos_time= 961.4857045 - 99.047814504000000 = 862.437889996000000
+
+rnd_pos_time() = io_cost + engine_mem_cost +
+ rows * (ROW_LOOKUP_COST + ROW_COPY_COST) =
+rows * avg_io_cost() * engine_block_size/IO_SIZE +
+rows * INDEX_BLOCK_COPY_COST +
+rows * (ROW_COPY_COST + ROW_LOOKUP_COST)
+= (When rows are in memory)
+
+rows * (INDEX_BLOCK_COPY_COST + ROW_COPY_COST + ROW_LOOKUP_COST)
+
+This gives us:
+862.437889996000000 = 1000000 * 3.56e-05 + 1000000*(0.000060865547560 + ROW_LOOKUP_COST)
+->
+ROW_LOOKUP_COST= (862.437889996000000 - 1000000*(3.56e-05+0.000060865547560)) / 1000000
+->
+ROW_LOOKUP_COST= 0.000765972342436
+
+Setting this makes InnoDB calculate the cost to 961.081050 (good enough)
+
+
+InnodDB EQ_REF with index_read
+==============================
+
+select straight_join count(*) from seq_1_to_1000000,test.check_costs_innodb where seq=l_linenumber
+time: 854.980610 ms
+
+Here the engine first has to do a key lookup and copy the key to the upper
+level (Index only read).
+
+According to analyze statement:
+
+- Cost for SELECT * from seq_1_to_1000000: 12.57 (See sequence_scan_cost)
+- Time from check_costs: eq_ref_join: 854.980610
+ This is time for accessing both seq_1_to_1000000 and check_costs
+ time for check_cost_innodb: 854.980610-12.57 = 842.410610 ms
+
+cost= rows * (keyread_time(1,1) + KEY_COPY_COST)
+
+keyread_time(1,1)= INDEX_BLOCK_COPY_COST + ranges * KEY_LOOKUP_COST +
+ (rows-ranges) * KEY_NEXT_FIND_COST
+
+As rows=1 and ranges=1:
+
+keyread_time(1,1)= INDEX_BLOCK_COPY_COST + KEY_LOOKUP_COST
+
+cost= rows * (KEY_COPY_COST + INDEX_BLOCK_COPY_COST + KEY_LOOKUP_COST)
+->
+KEY_LOOKUP_COST= cost/rows - INDEX_BLOCK_COPY_COST - KEY_COPY_COST;
+842.410610 / 1000000 - 3.56e-05 - 0.000015685222496
+->
+KEY_LOOKUP_COST= 0.000791125387504;
+
+After the above we have
+last_query_cost=918.986438;
+
+The cost for check_costs_innodb =
+last_query_cost - sequence_scan_cost - where_cost*2 =
+918.986438 - 12.57 - 32*2 = 842.416438 (ok)
+
+
+InnodDB EQ_REF with clustered index read
+========================================
+
+select straight_join count(*) from seq_1_to_1000000,check_costs_innodb where seq=l_orderkey
+eq_ref_cluster_join time: 972.290773 ms
+
+According to analyze statement:
+- Cost for SELECT * from seq_1_to_1000000: 12.57 (See sequence_scan_cost)
+- Time from check_costs: eq_ref_cluster_join: 972.290773 ms
+ This is time for accessing both seq_1_to_1000000 and check_costs_innodb.
+ Time for check_cost_innodb: 972.290773 - 12.57 = 959.790773
+
+The estimated cost is 875.0160
+
+cost= rows * (keyread_time(1,1) +
+ ranges * ROW_LOOKUP_COST +
+ (rows - ranges) * ROW_NEXT_FIND_COST +
+ rows * ROW_COPY_COST)
+
+As rows=1 and ranges=1:
+
+cost= rows * (INDEX_BLOCK_COPY_COST + ROW_LOOKUP_COST + ROW_COPY_COST);
+->
+ROW_LOOKUP_COST= cost/rows - INDEX_BLOCK_COPY_COST - ROW_COPY_COST;
+959.790773 / 1000000 - 3.56e-05 - 0.000060865547560
+->
+ROW_LOOKUP_COST= 0.0008633252254400001
+
+From InnoDB RANGE SCAN we have ROW_LOOKUP_COST=0.000765972342436
+From EQ_REF with index read we have KEY_LOOKUP_COST= 0.000791125387504,
+which should in theory be identical to ROW_LOOKUP_COST,
+
+For now we have to live with the difference (as I want to have the project done
+for the next release).
+
+The difference could be come from the following things:
+
+- InnoDB estimation of rows in the range scan test is a bit off.
+- Maybe the work to find a row from an internal key entry compared to
+ a external key is a bit difference (less checking/conversions)
+- There is different keys used for range scan and this test that could have
+ different costs
+- Maybe we should increase ROW_COPY_COST or ROW_LOOKUP_COST for InnoDB
+ and adjust other costs.
+
+
+Some background. In range scan, the cost is:
+- Scanning over all keys
+ - For each key, fetch row using rowid
+
+For the EQ_REF cache
+- Scan seq_1_to_1000000
+ for each value in seq
+ do a index_read() call
+
+
+Archive scan cost
+=================
+
+table_scan time: 757.390280 ms
+rows: 1000000
+file size: 32260650 = 7878 IO_SIZE blocks
+
+cost= scan_time() + TABLE_SCAN_SETUP_COST +
+ records * (ROW_COPY_COST + ROW_LOOKUP_COST + WHERE_COMPARE_COST);
+
+757.390280 = scan_time() + 10 + 1000000 * (0.060866+0.032000)
+->
+scan_time()= 757.390280 - (10 + 1000000 * (0.060866+0.032000)/1000) = 654.52428
+
+scan_time() is defined as:
+
+cost.cpu= (blocks * DISK_READ_COST * DISK_READ_RATIO +
+ blocks * ARCHIVE_DECOMPRESS_TIME);
+
+Default values for above:
+blocks= 7878
+DISK_READ_COST: 10.240000 usec
+DIUSK_READ_RATIO= 0.20
+->
+ARCHIVE_COMPRESS_TIME= (654.52428 - (7878 * 10.240000/1000*0.2)) / 7878 =
+0.081034543792841
+
+
+MyRocksDB, TABLE SCAN
+=====================
+
+select sum(l_quantity) from check_costs_rocksdb;
+table_scan 213.038648 ms
+
+cost= blocks * avg_io_cost() *
+ optimizer_cache_cost * SCAN_LOOKUP_COST +
+ engine_blocks * INDEX_BLOCK_COPY_COST +
+ TABLE_SCAN_SETUP_COST +
+ rows * (ROW_NEXT_FIND_COST + ROW_COPY_COST));
+
+Some defaults:
+optimizer_cache_cost = 0
+index_block_copy_cost= 0.000035600 (Assume same as innoDB)
+table_scan_setup_cost= 0 (Lets ignore it for now)
+row_copy_cost=0.000060865547560 (Assume same as InnoDB for now)
+
+show table status tells us that datalength=64699000 = 15795 4K-blocks.
+
+cost= engine_blocks * INDEX_BLOCK_COPY_COST +
+ TABLE_SCAN_SETUP_COST +
+ rows * (ROW_NEXT_FIND_COST + ROW_COPY_COST))
+
+ROW_NEXT_FIND_COST=
+(costs - engine_blocks * INDEX_BLOCK_COPY_COST)/rows -
+ROW_COPY_COST
+= (213.03868 - 15796 * 0.000035600 - 0)/1000000 - 0.000060865547560 =
+0.00015161079484
+
+
+MyRocks INDEX SCAN
+==================
+
+select count(*) from test.check_costs_rocksdb force index (l_suppkey) where l_suppkey >= 0 and l_partkey >=0
+index_scan 266.80435 ms
+
+Note that myrocks returns 2M rows for the table when it has only 1M rows!
+
+block_size= 8192
+key_length= 18
+compression=0.25 (75 %)
+blocks= (key_length * rows) / 4 * block_size/4096 = 18 * 1000000/4 * 2=
+2198 IO_BLOCKS (=1094 engine_blocks)
+
+cost= keyread_time= blocks * avg_io_cost * DISK_READ_RATIO + engine_blocks * INDEX_BLOCK_COPY_COST + rows * (KEY_NEXT_FIND_COST + KEY_COPY_COST);
+
+As we assume that everything is in memory (DISK_READ_RATIO=0)
+->
+cost= engine_blocks * INDEX_BLOCK_COPY_COST + rows * KEY_NEXT_FIND_COST;
+
+Assuming INDEX_BLOCK_COPY_COST and KEY_COPY_COST are same as in Aria and InnoDB)
+
+cost= 1094 * 3.56e-05 + 1000000 * (KEY_NEXT_FIND_COST + KEY_COPY_COST)
+=
+KEY_NEXT_FIND_COST + KEY_COPY_COST= (266.80435 - 1094 * 3.56e-05)/1000000
+=
+KEY_NEXT_FIND_COST= (266.80435 - 1094 * 3.56e-05)/1000000 - 0.000015685222496
+->
+KEY_NEXT_FIND_COST= 0.000251080181104
+
+
+MyRocks EQ_REF with index_read
+==============================
+
+select straight_join count(*) from seq_1_to_1000000,test.check_costs_rocksdb where seq=l_linenumber
+time: 857.548991
+
+Here the engine first has to do a key lookup and copy the key to the upper
+level (Index only read).
+
+According to analyze statement:
+
+- Cost for SELECT * from seq_1_to_1000000: 12.57 (See sequence_scan_cost)
+- Time from check_costs: eq_ref_join: 857.548991
+ This is time for accessing both seq_1_to_1000000 and check_costs
+ time for check_cost_innodb: 857.548991-12.57 = 844.978991 ms
+
+cost= rows * (keyread_time(1,1) + KEY_COPY_COST)
+
+keyread_time(1,1)= INDEX_BLOCK_COPY_COST + ranges * KEY_LOOKUP_COST +
+ (rows-ranges) * KEY_NEXT_FIND_COST
+
+As rows=1 and ranges=1:
+
+keyread_time(1,1)= INDEX_BLOCK_COPY_COST + KEY_LOOKUP_COST
+
+cost= rows * (KEY_COPY_COST + INDEX_BLOCK_COPY_COST + KEY_LOOKUP_COST)
+->
+KEY_LOOKUP_COST= cost/rows - INDEX_BLOCK_COPY_COST - KEY_COPY_COST;
+844.978991 / 1000000 - 3.56e-05 - 0.000015685222496 = 0.000793693768504
+
+
+MyRocks EQ_REF with clustered index read
+========================================
+
+select straight_join count(*) from seq_1_to_1000000,check_costs_rocksdb where seq=l_orderkey
+eq_ref_cluster_join 1613.5670 ms
+
+According to analyze statement:
+- Cost for SELECT * from seq_1_to_1000000: 12.57 (See sequence_scan_cost)
+- Time from check_costs: eq_ref_cluster_join: 1613.5670 ms
+ This is time for accessing both seq_1_to_1000000 and check_costs_innodb.
+ Time for check_cost_rocksdb: 1613.5670 - 12.57 = 1600.9970
+
+cost= rows * (keyread_time(1,1) +
+ ranges * ROW_LOOKUP_COST +
+ (rows - ranges) * ROW_NEXT_FIND_COST +
+ rows * ROW_COPY_COST)
+
+As rows=1 and ranges=1:
+
+cost= rows * (INDEX_BLOCK_COPY_COST + ROW_LOOKUP_COST + ROW_COPY_COST);
+->
+ROW_LOOKUP_COST= cost/rows - INDEX_BLOCK_COPY_COST - ROW_COPY_COST;
+1600.9970 / 1000000 - 3.56e-05 - 0.000060865547560 = 0.00150453145244
+
+
+MyRocks Range scan
+==================
+select sum(l_orderkey) from test.check_costs_rocksdb force index(l_suppkey) where l_suppkey >= 0 and l_partkey >=0 and l_discount>=0.0
+
+The MyRocks engine estimates the number of rows both for the table and
+for the to be about 2M, double the real ammount.
+
+The timing and costs from check_costs.pl are:
+
+range_scan time: 1845.06126 ms cost-where: 3698.8919 cost: 3730.8919
+
+As the costs are about the double of the time, this is as good as we can do things until
+MyRocks reported record count is corrected
+
+The issue with wrongly estimated number of rows does not affect the other results from check_costs.pl
+as table scans estimates uses the number of rows from the analyze, not from the engine.
+
+
+Appendix
+========
+
+Future improvements
+===================
+
+The current costs are quite good for tables of 1M rows (usually about
+10% from the true cost for the test table).
+
+For smaller tables the costs will be a bit on the high side and for
+bigger tables a bit on the low size for eq_ref joins (both with index
+and with row lookup).
+
+The only engine that takes into account the number of rows for key lookups
+is heap with binary-tree indexes.
+
+Ideas of how to fix this:
+
+- Change KEY_LOOKUP_COST, INDEX_BLOCK_COPY_COST and ROW_LOOKUP_COST
+ (for clustered index) to take into account the hight of the B tree.
+
+
+Observations
+============
+
+Ratio between table scan and range scan
+
+Quereyies used:
+select sum(l_quantity) from check_costs_aria;
+select sum(l_orderkey) from test.check_costs_aria force index(l_suppkey) where l_suppkey >= 0 and l_partkey >=0 and l_discount>=0.0;
+
+The test for Aria shows that cost ratio of range_scan/table_scan are:
+disk_read_ratio=0 341.745207/139.348286= 2.4524536097
+disk_read_ratio=0.02 752.408528/145.748695= 5.1623688843
+disk_read_ratio=0.20 4448.378423/203.352382= 21.8752216190
+
+As we are using disk_read_ratio=0.02 by default, this means that in
+mtr to not use table scan instead of range, we have to ensure that the
+range does not cover more than 1/5 of the total rows.
+
+
+Trying to understand KEY_COPY_COST
+==================================
+
+An index scan with 2 and 4 key parts on an Aria table.
+The index has null key parts, so packed keys are used.
+
+Query1 "index_scan" (2 integer key parts, both key parts may have NULLS):
+select count(*) from $table force index (l_suppkey) where l_suppkey >= 0 and l_partkey >=0");
+
+- Optimized build: Average 164 ms/query
+- gprof build: Average 465 ms/query
+
+[16] 51.2 0.00 0.21 3999987 handler::ha_index_next()
+[15] 51.2 0.01 0.20 3999993 maria_rnext [15]
+[22] 19.5 0.08 0.00 9658527 _ma_get_pack_key [22]
+
+This means that for 3999987 read next calls, the time of _ma_get_pack_key
+to retrieve the returned key is:
+0.08 * (3999987/9658527)
+
+The relation of KEY_COPY_COST to KEY_NEXT_FIND_COST is thus for Aria:
+
+0.08 * (3999987/9658527)/0.21 = 0.15777 parts of KEY_NEXT_FIND_COST
+
+------
+
+Query 2 "index_scan_4_parts" (4 integer key parts, 2 parts may have NULL's):
+select count(*) from $table force index (long_suppkey) where l_linenumber >= 0 and l_extra >0");
+
+- Optimized build: 218 ms
+- gprof build: Average 497 ms/query
+
+Most costly functions
+ % cumulative self self total
+ time seconds seconds calls ms/call ms/call name
+ 13.44 0.61 0.61 48292742 0.00 0.00 _ma_get_pack_key
+ 8.59 1.00 0.39 28298101 0.00 0.00 ha_key_cmp
+ 7.27 1.33 0.33 19999951 0.00 0.00 _ma_put_key_in_record
+ 4.41 1.96 0.20 19999952 0.00 0.00 handler::ha_index_next(unsigned char*)
+
+Call graph
+[13] 9.0 0.20 0.21 19999952 handler::ha_index_next(unsigned char*) [13]
+
+[3] 21.6 0.16 0.82 19999960 _ma_search_next [3]
+[18] 7.7 0.02 0.33 19999951 _ma_read_key_record [18]
+ 0.00 0.00 19887291/19999952 _ma_get_static_key [6565][19]
+ 18.4 0.10 0.64 19999936 Item_cond_and::val_int() [19]
+
+-> KEY_COPY_COST = 1.33/1.96 = 0.6785 parts of the index_read_next
+
+Total cost increase from 2 -> 4 key parts = 1.96 / 1.40 = 40%
+This includes the additional work in having more key pages, more work in
+finding next key (if key parts are packed or possible null) ,and copying
+the key parts to the record
+
+I also did a quick analyze between using NOT NULL keys, in which case
+Aria can use fixed key lengths. This gives a 39.4% speed up on index
+scan, a small speedup to table scan (as 2 fields are cannot have null)
+but not a notable speed up for anything else.
+
+
+Trying to understand ROW_COPY_COST
+==================================
+
+An simple table scan on an Aria table
+
+query: select sum(l_quantity) from check_costs_aria
+
+From gprof running the above query 10 times with 1M rows in the table:
+
+[14] 83.7 0.03 0.76 9999989 handler::ha_rnd_next()
+[17] 51.6 0.49 0.00 10000010 _ma_read_block_record2 [17]
+[18] 21.1 0.01 0.19 156359 pagecache_read [18]
+
+The function that unpacks the row is _ma_read_block_record2()
+
+Taking into account that all pages are cached:
+(Note that the main cost in pagecache_read in this test is calculating the page
+checksum)
+
+ROW_COPY_COST/ROW_NEXT_FIND_COST= 0.49/(0.76+0.3-0.20) = 0.56977 = 0.57
+
+
+Reason for SCAN_SETUP_COSTS
+===========================
+
+One problem with the new more exact cost model is that the optimizer
+starts to use table scans much more for small tables (which is correct when
+one looks at cost). However, small tables are usually cached fully so
+it is still better to use index scan in many cases.
+
+This problem is especially notable in mtr where most test cases uses
+tables with very few rows.
+
+TABLE_SCAN_SETUP_COST is used to add a constant startup cost for
+table and index scans. It is by default set to 10 usec, about 10 MyISAM
+row reads.
+
+The following cost calculation shows why this is needed:
+
+explain select count(*) from t1, t2 where t1.p = t2.i
++------+-------------+-------+-------+---------------+---------+---------+-----------+------+-------------+
+| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra |
++------+-------------+-------+-------+---------------+---------+---------+-----------+------+-------------+
+| 1 | SIMPLE | t1 | index | PRIMARY | PRIMARY | 4 | NULL | 2 | Using index |
+| 1 | SIMPLE | t2 | ref | k1 | k1 | 5 | test.t1.p | 2 | Using index |
++------+-------------+-------+-------+---------------+---------+---------+-----------+------+-------------+
+
+t1 has 2 rows
+t2 has 4 rows
+
+Optimizer trace shows when using TABLE_SCAN_SETUP_COST=0:
+
+index scan costs
+"read_cost": 0.00308962,
+read_and_compare_cost": 0.00321762
+
+key read costs:
+"rows": 2,
+"cost": 0.00567934
+
+CHOSEN:
+Scan with join cache: cost": 0.0038774
+rows_after_scan": 2
+
+Note that in the following, we are using cost in microseconds while
+the above costs are in milliseconds.
+
+select * from information_schema.optimizer_costs where engine="myisam"\G
+ ENGINE: MyISAM
+ OPTIMIZER_DISK_READ_COST: 10.240000
+ OPTIMIZER_INDEX_BLOCK_COPY_COST: 0.035600
+ OPTIMIZER_KEY_COMPARE_COST: 0.008000
+ OPTIMIZER_KEY_COPY_COST: 0.066660
+ OPTIMIZER_KEY_LOOKUP_COST: 0.498540
+ OPTIMIZER_KEY_NEXT_FIND_COST: 0.060210
+ OPTIMIZER_DISK_READ_RATIO: 0.200000
+OPTIMIZER_RND_POS_INTERFACE_COST: 0.000000
+ OPTIMIZER_ROW_COPY_COST: 0.088630
+ OPTIMIZER_ROW_LOOKUP_COST: 0.641150
+ OPTIMIZER_ROW_NEXT_FIND_COST: 0.049510
+ OPTIMIZER_ROWID_COMPARE_COST: 0.004000
+@@OPTIMIZER_SCAN_SETUP_COST 10.000000
+@@OPTIMIZER_WHERE_COST 0.032000
+
+Checking the calculated costs:
+
+index_scan_cost= 10.240000 * 0.2 + 0.035600 + 0.498540 + 4 * (0.060210+0.066660) = 3.08962
+where_cost 0.032000*4= 0.128000
+total: 3.21762
+
+key_read_cost= 10.240000 * 0.2 + 0.035600 + 0.498540 + 0.060210 = 2.64235
+key_copy_cost= 0.066660 * 2 = 0.13332
+where_cost 0.032000*2= 0.06400
+total: 2.64235 + 0.13332 + 0.06400 = 2.8396699999999999
+Needs to be done 2 times (2 rows in t1): 5.67934
+
+Join cache only needs 1 refill. The calculation is done in
+sql_select.cc:best_access_path()
+
+scan_with_join_cache=
+scan_time + cached_combinations * ROW_COPY_COST * JOIN_CACHE_COST +
+row_combinations * (ROW_COPY_COST * JOIN_CACHE_COST + WHERE_COST) =
+3.2176 + 2 * 0.088630 + 2*2 * (0.088630 * 1 + 0.032000) =
+3.87738
+
+Other observations:
+OPTIMIZER_KEY_NEXT_FIND_COST + OPTIMIZER_KEY_COPY_COST + OPTIMIZER_WHERE_COST=
+0.060210 + 0.066660 + 0.032000 = 0.158870
+OPTIMIZER_KEY_LOOKUP_COST / 0.158870 = 3.138
+
+This means that when using index only reads (and DISK_READ_RATIO=0)
+the optimizer will prefer to use 3 times more keys in range or ref
+than doing a key lookups!
+If DISK_READ_RATIO is higher, the above ratio increases. This is one of
+the reasons why we set the default value for DISK_READ_RATIO quite low
+(0.02 now)
+
+(OPTIMIZER_ROW_COPY_COST + OPTIMIZER_ROW_NEXT_FIND_COST) /
+(OPTIMIZER_KEY_COPY_COST + OPTIMIZER_KEY_NEXT_FIND_COST) =
+(0.088630 + 0.049510) / (0.066660 + 0.060210) = 1.08831
+Which means that table scans and index scans have almost the same cost.
+select 0.066660
+
+
+HEAP_TEMPTABLE_CREATE_COST
+==========================
+
+I added trackers in create_tmp_table() and open_tmp_table() and run a
+simple query that create two materialized temporary table with an unique
+index 31 times. I got the following tracking information:
+
+(gdb) p open_tracker
+$1 = {counter = 31, cycles = 302422}
+(gdb) p create_tracker
+$2 = {counter = 31, cycles = 1479836}
+
+Cycles per create = (302422 + 1479836)/31= 57492
+
+1000.0*57492/sys_timer_info.cycles.frequency = 0.0249 ms
+HEAP_TMPTABLE_CREATE_COST= 0.025 ms
+
+
+What to do with wrong row estimates
+===================================
+
+MyRocks can have a very bad estimate of rows, both for the number of rows in the table and also
+for big ranges. Analyze table can fix this, but we have to consider how to keep the row estimate
+correct when tables are growing over time.
+
+Suggested fixed:
+- If we can assume that the datafile size reported by the engine is somewhat correct, we could
+ estimate the number of rows as:
+ analyze_number_of_rows * current_datafile_size / analyze_datafile_size
+
+
+MySQL cost structures
+=====================
+
+MySQL 8.0 server cost are stored in the class Server_cost_constants defined
+int opt_costconstants.h
+
+It containts the following slots and has the following default values:
+
+m_row_evaluate_cost 0.1 Cost for evaluating the query condition on
+ a row
+m_key_compare_cost 0.05 Cost for comparing two keys
+m_memory_temptable_create_cost 1.0 Cost for creating an internal temporary
+ table in memory
+m_memory_temptable_row_cost 0.1 Cost for retrieving or storing a row in an
+ internal temporary table stored in memory.
+m_disk_temptable_create_cost 20.0 Cost for creating an internal temporary
+ table in a disk resident storage engine.
+m_disk_temptable_row_cost 0.5 Cost for retrieving or storing a row in an
+ internal disk resident temporary table.
+
+Engine cost variables:
+m_memory_block_read_cost 0.25 The cost of reading a block from a main
+ memory buffer pool
+m_io_block_read_cost 1.0 The cost of reading a block from an
+ IO device (disk)
+
+-------
+
+Some cost functions:
+
+scan_time() = data_file_length / IO_SIZE + 2;
+read_time(index, ranges, rows)= rows2double(ranges + rows);
+index_only_read_time()= records / keys_per_block
+
+table_scan_cost()= scan_time() * page_read_cost(1.0);
+
+index_scan_cost()= index_only_read_time(index, rows) *
+ page_read_cost_index(index, 1.0);
+read_cost()= read_time() * page_read_cost(1.0);
+
+
+page_read_cost()= buffer_block_read_cost(pages_in_mem) +
+ io_block_read_cost(pages_on_disk);
+
+io_block_read_cost()= blocks * m_io_block_read_cost
+buffer_block_read_cost()= blocks * m_memory_block_read_cost;
+
+
+There are also:
+table_in_memory_estimate()
+index_in_memory_estimate()
+
+If the storage engine is not providing estimates for the above, then
+the estimates are done based on table size (not depending on how many
+rows are going to be accessed in the table).
diff --git a/VERSION b/VERSION
index d26d4fbd86c..26f2d389e50 100644
--- a/VERSION
+++ b/VERSION
@@ -1,4 +1,4 @@
-MYSQL_VERSION_MAJOR=10
-MYSQL_VERSION_MINOR=11
-MYSQL_VERSION_PATCH=3
-SERVER_MATURITY=stable
+MYSQL_VERSION_MAJOR=11
+MYSQL_VERSION_MINOR=0
+MYSQL_VERSION_PATCH=2
+SERVER_MATURITY=gamma
diff --git a/client/mariadb-conv.cc b/client/mariadb-conv.cc
index 1774debe5f9..0e35812d9eb 100644
--- a/client/mariadb-conv.cc
+++ b/client/mariadb-conv.cc
@@ -20,13 +20,12 @@
Character set conversion utility
*/
+#define VER "1.0"
#include "mariadb.h"
#include "client_priv.h"
#include "sql_string.h"
#include "my_dir.h"
-
-#define CONV_VERSION "1.0"
-
+#include <welcome_copyright_notice.h>
class CmdOpt
{
@@ -415,8 +414,7 @@ public:
}
void usage(void)
{
- printf("%s Ver %s Distrib %s for %s on %s\n", my_progname, CONV_VERSION,
- MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE);
+ print_version();
puts("Character set conversion utility for MariaDB");
puts("Usage:");
printf("%s [OPTION...] [FILE...]\n", my_progname);
diff --git a/client/mysql.cc b/client/mysql.cc
index 478b8ed4ac9..eb6508903b6 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -46,7 +46,7 @@
#include <locale.h>
#endif
-const char *VER= "15.1";
+const char *VER= "15.2";
/* Don't try to make a nice table if the data is too big */
#define MAX_COLUMN_LENGTH 1024
@@ -246,7 +246,7 @@ static my_bool ignore_errors=0,wait_flag=0,quick=0,
tty_password= 0, opt_nobeep=0, opt_reconnect=1,
opt_secure_auth= 0,
default_pager_set= 0, opt_sigint_ignore= 0,
- auto_vertical_output= 0,
+ auto_vertical_output= 0, show_query_cost= 0,
show_warnings= 0, executing_query= 0,
ignore_spaces= 0, opt_binhex= 0, opt_progress_reports;
static my_bool debug_info_flag, debug_check_flag, batch_abort_on_error;
@@ -324,6 +324,7 @@ static int com_quit(String *str,char*),
com_notee(String *str, char*), com_charset(String *str,char*),
com_prompt(String *str, char*), com_delimiter(String *str, char*),
com_warnings(String *str, char*), com_nowarnings(String *str, char*);
+static int com_query_cost(String *str, char*);
#ifdef USE_POPEN
static int com_nopager(String *str, char*), com_pager(String *str, char*),
@@ -395,6 +396,8 @@ static COMMANDS commands[] = {
{ "print", 'p', com_print, 0, "Print current command." },
{ "prompt", 'R', com_prompt, 1, "Change your mysql prompt."},
{ "quit", 'q', com_quit, 0, "Quit mysql." },
+ { "costs", 'Q', com_query_cost, 0,
+ "Toggle showing query costs after each query" },
{ "rehash", '#', com_rehash, 0, "Rebuild completion hash." },
{ "source", '.', com_source, 1,
"Execute an SQL script file. Takes a file name as an argument."},
@@ -1156,6 +1159,7 @@ static void print_table_data_xml(MYSQL_RES *result);
static void print_tab_data(MYSQL_RES *result);
static void print_table_data_vertically(MYSQL_RES *result);
static void print_warnings(void);
+static void print_last_query_cost(void);
static void end_timer(ulonglong start_time, char *buff);
static void nice_time(double sec,char *buff,bool part_second);
extern "C" sig_handler mysql_end(int sig) __attribute__ ((noreturn));
@@ -1337,26 +1341,38 @@ int main(int argc,char *argv[])
initialize_readline();
if (!status.batch && !quick && !opt_html && !opt_xml)
{
- /* read-history from file, default ~/.mysql_history*/
- if (getenv("MYSQL_HISTFILE"))
- histfile=my_strdup(PSI_NOT_INSTRUMENTED, getenv("MYSQL_HISTFILE"),MYF(MY_WME));
- else if (getenv("HOME"))
+ const char *home;
+ /* read-history from file, default ~/.mariadb_history*/
+ if ((histfile= getenv("MARIADB_HISTFILE"))
+ || (histfile= getenv("MYSQL_HISTFILE")))
+ histfile=my_strdup(PSI_NOT_INSTRUMENTED, histfile, MYF(MY_WME));
+ else if ((home= getenv("HOME")))
{
histfile=(char*) my_malloc(PSI_NOT_INSTRUMENTED,
- strlen(getenv("HOME")) + strlen("/.mysql_history")+2, MYF(MY_WME));
+ strlen(home) + strlen("/.mariadb_history")+2, MYF(MY_WME));
if (histfile)
- sprintf(histfile,"%s/.mysql_history",getenv("HOME"));
- char link_name[FN_REFLEN];
- if (my_readlink(link_name, histfile, 0) == 0 &&
- strncmp(link_name, "/dev/null", 10) == 0)
{
- /* The .mysql_history file is a symlink to /dev/null, don't use it */
- my_free(histfile);
- histfile= 0;
+ sprintf(histfile,"%s/.mariadb_history", home);
+ if (my_access(histfile, F_OK))
+ {
+ /* no .mariadb_history, look for historical name and use if present */
+ sprintf(histfile,"%s/.mysql_history", home);
+ /* and go back to original if not found */
+ if (my_access(histfile, F_OK))
+ sprintf(histfile,"%s/.mariadb_history", home);
+ }
+ char link_name[FN_REFLEN];
+ if (my_readlink(link_name, histfile, 0) == 0 &&
+ strncmp(link_name, "/dev/null", 10) == 0)
+ {
+ /* The .mariadb_history file is a symlink to /dev/null, don't use it */
+ my_free(histfile);
+ histfile= 0;
+ }
}
}
- /* We used to suggest setting MYSQL_HISTFILE=/dev/null. */
+ /* We used to suggest setting MARIADB_HISTFILE=/dev/null. */
if (histfile && strncmp(histfile, "/dev/null", 10) == 0)
histfile= NULL;
@@ -1816,6 +1832,10 @@ static struct my_option my_long_options[] =
{"show-warnings", OPT_SHOW_WARNINGS, "Show warnings after every statement.",
&show_warnings, &show_warnings, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
+ {"show-query-costs", OPT_SHOW_WARNINGS,
+ "Show query cost after every statement.",
+ &show_query_cost, &show_query_cost, 0, GET_BOOL, NO_ARG,
+ 0, 0, 0, 0, 0, 0},
{"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.",
&opt_plugin_dir, &opt_plugin_dir, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -1850,12 +1870,12 @@ static void usage(int version)
#else
const char* readline= "readline";
#endif
- printf("%s Ver %s Distrib %s, for %s (%s) using %s %s\n",
- my_progname, VER, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE,
+ printf("%s from %s, client %s for %s (%s) using %s %s\n",
+ my_progname, MYSQL_SERVER_VERSION, VER, SYSTEM_TYPE, MACHINE_TYPE,
readline, rl_library_version);
#else
- printf("%s Ver %s Distrib %s, for %s (%s), source revision %s\n", my_progname, VER,
- MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE,SOURCE_REVISION);
+ printf("%s from %s, client %s for %s (%s), source revision %s\n", my_progname,
+ MYSQL_SERVER_VERSION, VER, SYSTEM_TYPE, MACHINE_TYPE,SOURCE_REVISION);
#endif
if (version)
@@ -3574,6 +3594,8 @@ end:
/* Show warnings if any or error occurred */
if (show_warnings == 1 && (warnings >= 1 || error))
print_warnings();
+ if (show_query_cost)
+ print_last_query_cost();
if (!error && !status.batch &&
(mysql.server_status & SERVER_STATUS_DB_DROPPED))
@@ -4181,6 +4203,33 @@ end:
}
+/* print_last_query_cost */
+
+static void print_last_query_cost()
+{
+ const char *query;
+ char *end;
+ MYSQL_RES *result;
+ MYSQL_ROW cur;
+
+ query= "show status like 'last_query_cost'";
+ mysql_real_query_for_lazy(query, strlen(query));
+ mysql_store_result_for_lazy(&result);
+ if (!result)
+ goto end;
+
+ cur= mysql_fetch_row(result);
+ if (strtod(cur[1], &end) != 0.0)
+ {
+ init_pager();
+ tee_fprintf(PAGER, "%s: %s\n\n", cur[0], cur[1]);
+ }
+
+end:
+ mysql_free_result(result);
+}
+
+
static const char *array_value(const char **array, char key)
{
for (; *array; array+= 2)
@@ -4756,6 +4805,18 @@ com_nowarnings(String *buffer __attribute__((unused)),
return 0;
}
+static int
+com_query_cost(String *buffer __attribute__((unused)),
+ char *line __attribute__((unused)))
+{
+ show_query_cost= 1 - show_query_cost;
+ if (show_query_cost)
+ put_info("Last_query_cost enabled.",INFO_INFO);
+ else
+ put_info("Last_query_cost disabled.",INFO_INFO);
+ return 0;
+}
+
/*
Gets argument from a command on the command line. If mode is not GET_NEXT,
skips the command and returns the first argument. The line is modified by
@@ -5011,6 +5072,10 @@ com_status(String *buffer __attribute__((unused)),
ulonglong id;
MYSQL_RES *UNINIT_VAR(result);
+ /*
+ Don't remove "limit 1",
+ it is protection against SQL_SELECT_LIMIT=0
+ */
if (mysql_real_query_for_lazy(
C_STRING_WITH_LEN("select DATABASE(), USER() limit 1")))
return 0;
@@ -5018,10 +5083,6 @@ com_status(String *buffer __attribute__((unused)),
tee_puts("--------------", stdout);
usage(1); /* Print version */
tee_fprintf(stdout, "\nConnection id:\t\t%lu\n",mysql_thread_id(&mysql));
- /*
- Don't remove "limit 1",
- it is protection against SQL_SELECT_LIMIT=0
- */
if (!mysql_store_result_for_lazy(&result))
{
MYSQL_ROW cur=mysql_fetch_row(result);
diff --git a/client/mysql_plugin.c b/client/mysql_plugin.c
index dab98b10043..ad50f64d663 100644
--- a/client/mysql_plugin.c
+++ b/client/mysql_plugin.c
@@ -15,17 +15,14 @@
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*/
+#define VER "1.0"
#include <my_global.h>
#include <m_string.h>
#include <mysql.h>
#include <my_getopt.h>
#include <my_dir.h>
#include <mysql_version.h>
-
-#define SHOW_VERSION "1.0.0"
-#define PRINT_VERSION do { printf("%s Ver %s Distrib %s\n", \
- my_progname, SHOW_VERSION, MYSQL_SERVER_VERSION); \
- } while(0)
+#include <welcome_copyright_notice.h>
/* Global variables. */
static uint my_end_arg= 0;
@@ -418,7 +415,7 @@ exit:
static void usage(void)
{
- PRINT_VERSION;
+ print_version();
puts("Copyright (c) 2011, 2015, Oracle and/or its affiliates. "
"All rights reserved.\n");
puts("Enable or disable plugins.");
@@ -504,7 +501,7 @@ get_one_option(const struct my_option *opt,
opt_verbose++;
break;
case 'V':
- PRINT_VERSION;
+ print_version();
exit(0);
break;
case '?':
diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c
index d026c347c82..4275fc774d4 100644
--- a/client/mysql_upgrade.c
+++ b/client/mysql_upgrade.c
@@ -20,9 +20,8 @@
#include <sslopt-vars.h>
#include <../scripts/mysql_fix_privilege_tables_sql.c>
-#include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */
-
#define VER "2.0"
+#include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */
#ifdef HAVE_SYS_WAIT_H
#include <sys/wait.h>
@@ -296,8 +295,7 @@ get_one_option(const struct my_option *opt, const char *argument,
switch (opt->id) {
case '?':
- printf("%s Ver %s Distrib %s, for %s (%s)\n",
- my_progname, VER, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE);
+ print_version();
puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"));
puts("MariaDB utility for upgrading databases to new MariaDB versions.");
print_defaults("my", load_default_groups);
@@ -702,7 +700,7 @@ static int get_upgrade_info_file_name(char* name)
dynstr_free(&ds_datadir);
- fn_format(name, "mysql_upgrade_info", name, "", MYF(0));
+ fn_format(name, "mariadb_upgrade_info", name, "", MYF(0));
DBUG_PRINT("exit", ("name: %s", name));
DBUG_RETURN(0);
}
@@ -711,7 +709,7 @@ static char upgrade_info_file[FN_REFLEN]= {0};
/*
- Open or create mysql_upgrade_info file in servers data dir.
+ Open or create mariadb_upgrade_info file in servers data dir.
Take a lock to ensure there cannot be any other mysql_upgrades
running concurrently
@@ -726,10 +724,19 @@ const char *create_error_message=
static void open_mysql_upgrade_file()
{
char errbuff[80];
+ char old_upgrade_info_file[FN_REFLEN]= {0};
+ size_t path_len;
+
if (get_upgrade_info_file_name(upgrade_info_file))
{
die("Upgrade failed");
}
+
+ // Delete old mysql_upgrade_info file
+ dirname_part(old_upgrade_info_file, upgrade_info_file, &path_len);
+ fn_format(old_upgrade_info_file, "mysql_upgrade_info", old_upgrade_info_file, "", MYF(0));
+ my_delete(old_upgrade_info_file, MYF(MY_IGNORE_ENOENT));
+
if ((info_file= my_create(upgrade_info_file, 0,
O_RDWR | O_NOFOLLOW,
MYF(0))) < 0)
@@ -786,7 +793,7 @@ static int faulty_server_versions(const char *version)
}
/*
- Read the content of mysql_upgrade_info file and
+ Read the content of mariadb_upgrade_info file and
compare the version number form file against
version number which mysql_upgrade was compiled for
@@ -863,16 +870,16 @@ static int upgrade_already_done(int silent)
if (!silent)
{
verbose("This installation of MariaDB is already upgraded to %s.\n"
- "There is no need to run mysql_upgrade again for %s.",
+ "There is no need to run mariadb-upgrade again for %s.",
upgrade_from_version, version);
if (!opt_check_upgrade)
- verbose("You can use --force if you still want to run mysql_upgrade",
+ verbose("You can use --force if you still want to run mariadb-upgrade",
upgrade_from_version, version);
}
return 0;
}
-static void finish_mysql_upgrade_info_file(void)
+static void finish_mariadb_upgrade_info_file(void)
{
if (info_file < 0)
return;
@@ -1465,7 +1472,7 @@ int main(int argc, char **argv)
printf("The --upgrade-system-tables option was used, user tables won't be touched.\n");
/*
- Read the mysql_upgrade_info file to check if mysql_upgrade
+ Read the mariadb_upgrade_info file to check if mysql_upgrade
already has been run for this installation of MariaDB
*/
if (!opt_force && !upgrade_already_done(0))
@@ -1497,7 +1504,7 @@ int main(int argc, char **argv)
verbose("OK");
/* Finish writing indicating upgrade has been performed */
- finish_mysql_upgrade_info_file();
+ finish_mariadb_upgrade_info_file();
DBUG_ASSERT(phase == phases_total);
diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc
index 38b3be837bc..1b99b5ce085 100644
--- a/client/mysqladmin.cc
+++ b/client/mysqladmin.cc
@@ -17,6 +17,7 @@
/* maintenance of mysql databases */
+#define VER "9.1"
#include "client_priv.h"
#include <signal.h>
#include <my_pthread.h> /* because of signal() */
@@ -28,7 +29,6 @@
#include <password.h>
#include <my_sys.h>
-#define ADMIN_VERSION "9.1"
#define MAX_MYSQL_VAR 512
#define SHUTDOWN_DEF_TIMEOUT 3600 /* Wait for shutdown */
#define MAX_TRUNC_LENGTH 3
@@ -1389,13 +1389,6 @@ static char **mask_password(int argc, char ***argv)
return(temp_argv);
}
-static void print_version(void)
-{
- printf("%s Ver %s Distrib %s, for %s on %s\n",my_progname,ADMIN_VERSION,
- MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE);
-}
-
-
static void usage(void)
{
print_version();
diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc
index 67cf008d732..d7fc64d730a 100644
--- a/client/mysqlbinlog.cc
+++ b/client/mysqlbinlog.cc
@@ -29,6 +29,7 @@
Format_desc_of_slave, Rotate_of_master, Format_desc_of_master.
*/
+#define VER "3.5"
#define MYSQL_CLIENT
#undef MYSQL_SERVER
#define TABLE TABLE_CLIENT
@@ -80,7 +81,7 @@ DYNAMIC_ARRAY events_in_stmt; // Storing the events that in one statement
String stop_event_string; // Storing the STOP_EVENT output string
extern "C" {
-char server_version[SERVER_VERSION_LENGTH];
+char server_version[SERVER_VERSION_LENGTH]="5.0.0";
}
static char *server_id_str;
@@ -276,16 +277,10 @@ class Load_log_processor
When we see first event corresponding to some LOAD DATA statement in
binlog, we create temporary file to store data to be loaded.
We add name of this file to file_names array using its file_id as index.
- If we have Create_file event (i.e. we have binary log in pre-5.0.3
- format) we also store save event object to be able which is needed to
- emit LOAD DATA statement when we will meet Exec_load_data event.
- If we have Begin_load_query event we simply store 0 in
- File_name_record::event field.
*/
struct File_name_record
{
char *fname;
- Create_file_log_event *event;
};
/*
@todo Should be a map (e.g., a hash map), not an array. With the
@@ -355,7 +350,6 @@ public:
if (ptr->fname)
{
my_free(ptr->fname);
- delete ptr->event;
bzero((char *)ptr, sizeof(File_name_record));
}
}
@@ -364,34 +358,6 @@ public:
}
/**
- Obtain Create_file event for LOAD DATA statement by its file_id
- and remove it from this Load_log_processor's list of events.
-
- Checks whether we have already seen a Create_file_log_event with
- the given file_id. If yes, returns a pointer to the event and
- removes the event from array describing active temporary files.
- From this moment, the caller is responsible for freeing the memory
- occupied by the event.
-
- @param[in] file_id File id identifying LOAD DATA statement.
-
- @return Pointer to Create_file_log_event, or NULL if we have not
- seen any Create_file_log_event with this file_id.
- */
- Create_file_log_event *grab_event(uint file_id)
- {
- File_name_record *ptr;
- Create_file_log_event *res;
-
- if (file_id >= file_names.elements)
- return 0;
- ptr= dynamic_element(&file_names, file_id, File_name_record*);
- if ((res= ptr->event))
- bzero((char *)ptr, sizeof(File_name_record));
- return res;
- }
-
- /**
Obtain file name of temporary file for LOAD DATA statement by its
file_id and remove it from this Load_log_processor's list of events.
@@ -414,125 +380,19 @@ public:
if (file_id >= file_names.elements)
return 0;
ptr= dynamic_element(&file_names, file_id, File_name_record*);
- if (!ptr->event)
- {
- res= ptr->fname;
- bzero((char *)ptr, sizeof(File_name_record));
- }
+ res= ptr->fname;
+ bzero((char *)ptr, sizeof(File_name_record));
return res;
}
- Exit_status process(Create_file_log_event *ce);
- Exit_status process(Begin_load_query_log_event *ce);
+ Exit_status process(Begin_load_query_log_event *blqe);
Exit_status process(Append_block_log_event *ae);
- File prepare_new_file_for_old_format(Load_log_event *le, char *filename);
- Exit_status load_old_format_file(NET* net, const char *server_fname,
- uint server_fname_len, File file);
Exit_status process_first_event(const char *bname, size_t blen,
const uchar *block,
- size_t block_len, uint file_id,
- Create_file_log_event *ce);
+ size_t block_len, uint file_id);
};
/**
- Creates and opens a new temporary file in the directory specified by previous call to init_by_dir_name() or init_by_cur_dir().
-
- @param[in] le The basename of the created file will start with the
- basename of the file pointed to by this Load_log_event.
-
- @param[out] filename Buffer to save the filename in.
-
- @return File handle >= 0 on success, -1 on error.
-*/
-File Load_log_processor::prepare_new_file_for_old_format(Load_log_event *le,
- char *filename)
-{
- size_t len;
- char *tail;
- File file;
-
- fn_format(filename, le->fname, target_dir_name, "", MY_REPLACE_DIR);
- len= strlen(filename);
- tail= filename + len;
-
- if ((file= create_unique_file(filename,tail)) < 0)
- {
- error("Could not construct local filename %s.",filename);
- return -1;
- }
-
- le->set_fname_outside_temp_buf(filename,len+strlen(tail));
-
- return file;
-}
-
-
-/**
- Reads a file from a server and saves it locally.
-
- @param[in,out] net The server to read from.
-
- @param[in] server_fname The name of the file that the server should
- read.
-
- @param[in] server_fname_len The length of server_fname.
-
- @param[in,out] file The file to write to.
-
- @retval ERROR_STOP An error occurred - the program should terminate.
- @retval OK_CONTINUE No error, the program should continue.
-*/
-Exit_status Load_log_processor::load_old_format_file(NET* net,
- const char*server_fname,
- uint server_fname_len,
- File file)
-{
- uchar buf[FN_REFLEN+1];
- buf[0] = 0;
- memcpy(buf + 1, server_fname, server_fname_len + 1);
- if (my_net_write(net, buf, server_fname_len +2) || net_flush(net))
- {
- error("Failed requesting the remote dump of %s.", server_fname);
- return ERROR_STOP;
- }
-
- for (;;)
- {
- ulong packet_len = my_net_read(net);
- if (packet_len == 0)
- {
- if (my_net_write(net, (uchar*) "", 0) || net_flush(net))
- {
- error("Failed sending the ack packet.");
- return ERROR_STOP;
- }
- /*
- we just need to send something, as the server will read but
- not examine the packet - this is because mysql_load() sends
- an OK when it is done
- */
- break;
- }
- else if (packet_len == packet_error)
- {
- error("Failed reading a packet during the dump of %s.", server_fname);
- return ERROR_STOP;
- }
-
- if (packet_len > UINT_MAX)
- {
- error("Illegal length of packet read from net.");
- return ERROR_STOP;
- }
- if (my_write(file, net->read_pos, (uint) packet_len, MYF(MY_WME|MY_NABP)))
- return ERROR_STOP;
- }
-
- return OK_CONTINUE;
-}
-
-
-/**
Process the first event in the sequence of events representing a
LOAD DATA statement.
@@ -555,8 +415,7 @@ Exit_status Load_log_processor::process_first_event(const char *bname,
size_t blen,
const uchar *block,
size_t block_len,
- uint file_id,
- Create_file_log_event *ce)
+ uint file_id)
{
size_t full_len= target_dir_name_len + blen + 9 + 9 + 1;
Exit_status retval= OK_CONTINUE;
@@ -568,7 +427,6 @@ Exit_status Load_log_processor::process_first_event(const char *bname,
if (!(fname= (char*) my_malloc(PSI_NOT_INSTRUMENTED, full_len,MYF(MY_WME))))
{
error("Out of memory.");
- delete ce;
DBUG_RETURN(ERROR_STOP);
}
@@ -583,12 +441,10 @@ Exit_status Load_log_processor::process_first_event(const char *bname,
error("Could not construct local filename %s%s.",
target_dir_name,bname);
my_free(fname);
- delete ce;
DBUG_RETURN(ERROR_STOP);
}
rec.fname= fname;
- rec.event= ce;
/*
fname is freed in process_event()
@@ -599,13 +455,9 @@ Exit_status Load_log_processor::process_first_event(const char *bname,
{
error("Out of memory.");
my_free(fname);
- delete ce;
DBUG_RETURN(ERROR_STOP);
}
- if (ce)
- ce->set_fname_outside_temp_buf(fname, strlen(fname));
-
if (my_write(file, (uchar*)block, block_len, MYF(MY_WME|MY_NABP)))
{
error("Failed writing to file.");
@@ -621,31 +473,11 @@ Exit_status Load_log_processor::process_first_event(const char *bname,
/**
- Process the given Create_file_log_event.
-
- @see Load_log_processor::process_first_event(const char*,uint,const char*,uint,uint,Create_file_log_event*)
-
- @param ce Create_file_log_event to process.
-
- @retval ERROR_STOP An error occurred - the program should terminate.
- @retval OK_CONTINUE No error, the program should continue.
-*/
-Exit_status Load_log_processor::process(Create_file_log_event *ce)
-{
- const char *bname= ce->fname + dirname_length(ce->fname);
- size_t blen= ce->fname_len - (bname-ce->fname);
-
- return process_first_event(bname, blen, ce->block, ce->block_len,
- ce->file_id, ce);
-}
-
-
-/**
Process the given Begin_load_query_log_event.
@see Load_log_processor::process_first_event(const char*,uint,const char*,uint,uint,Create_file_log_event*)
- @param ce Begin_load_query_log_event to process.
+ @param blqe Begin_load_query_log_event to process.
@retval ERROR_STOP An error occurred - the program should terminate.
@retval OK_CONTINUE No error, the program should continue.
@@ -653,7 +485,7 @@ Exit_status Load_log_processor::process(Create_file_log_event *ce)
Exit_status Load_log_processor::process(Begin_load_query_log_event *blqe)
{
return process_first_event("SQL_LOAD_MB", 11, blqe->block, blqe->block_len,
- blqe->file_id, 0);
+ blqe->file_id);
}
@@ -1244,41 +1076,6 @@ Exit_status process_event(PRINT_EVENT_INFO *print_event_info, Log_event *ev,
break;
}
- case CREATE_FILE_EVENT:
- {
- Create_file_log_event* ce= (Create_file_log_event*)ev;
- /*
- We test if this event has to be ignored. If yes, we don't save
- this event; this will have the good side-effect of ignoring all
- related Append_block and Exec_load.
- Note that Load event from 3.23 is not tested.
- */
- if (shall_skip_database(ce->db))
- goto end; // Next event
- /*
- We print the event, but with a leading '#': this is just to inform
- the user of the original command; the command we want to execute
- will be a derivation of this original command (we will change the
- filename and use LOCAL), prepared in the 'case EXEC_LOAD_EVENT'
- below.
- */
- print_skip_replication_statement(print_event_info, ev);
- if (ce->print(result_file, print_event_info, TRUE))
- goto err;
- // If this binlog is not 3.23 ; why this test??
- if (glob_description_event->binlog_version >= 3)
- {
- /*
- transfer the responsibility for destroying the event to
- load_processor
- */
- ev= NULL;
- if ((retval= load_processor.process(ce)) != OK_CONTINUE)
- goto end;
- }
- break;
- }
-
case APPEND_BLOCK_EVENT:
/*
Append_block_log_events can safely print themselves even if
@@ -1292,36 +1089,6 @@ Exit_status process_event(PRINT_EVENT_INFO *print_event_info, Log_event *ev,
goto end;
break;
- case EXEC_LOAD_EVENT:
- {
- if (ev->print(result_file, print_event_info))
- goto err;
- Execute_load_log_event *exv= (Execute_load_log_event*)ev;
- Create_file_log_event *ce= load_processor.grab_event(exv->file_id);
- /*
- if ce is 0, it probably means that we have not seen the Create_file
- event (a bad binlog, or most probably --start-position is after the
- Create_file event). Print a warning comment.
- */
- if (ce)
- {
- bool error;
- /*
- We must not convert earlier, since the file is used by
- my_open() in Load_log_processor::append().
- */
- convert_path_to_forward_slashes((char*) ce->fname);
- error= ce->print(result_file, print_event_info, TRUE);
- my_free((void*)ce->fname);
- delete ce;
- if (error)
- goto err;
- }
- else
- warning("Ignoring Execute_load_log_event as there is no "
- "Create_file event for file_id: %u", exv->file_id);
- break;
- }
case FORMAT_DESCRIPTION_EVENT:
delete glob_description_event;
glob_description_event= (Format_description_log_event*) ev;
@@ -1578,23 +1345,14 @@ Exit_status process_event(PRINT_EVENT_INFO *print_event_info, Log_event *ev,
destroy_evt= FALSE;
break;
}
- case PRE_GA_WRITE_ROWS_EVENT:
- case PRE_GA_DELETE_ROWS_EVENT:
- case PRE_GA_UPDATE_ROWS_EVENT:
- {
- Old_rows_log_event *e= (Old_rows_log_event*) ev;
- bool is_stmt_end= e->get_flags(Rows_log_event::STMT_END_F);
- if (print_row_event(print_event_info, ev, e->get_table_id(),
- e->get_flags(Old_rows_log_event::STMT_END_F)))
- goto err;
- DBUG_PRINT("info", ("is_stmt_end: %d", (int) is_stmt_end));
- if (!is_stmt_end && opt_flashback)
- destroy_evt= FALSE;
- break;
- }
case START_ENCRYPTION_EVENT:
glob_description_event->start_decryption((Start_encryption_log_event*)ev);
/* fall through */
+ case PRE_GA_WRITE_ROWS_EVENT:
+ case PRE_GA_DELETE_ROWS_EVENT:
+ case PRE_GA_UPDATE_ROWS_EVENT:
+ case CREATE_FILE_EVENT:
+ case EXEC_LOAD_EVENT:
default:
print_skip_replication_statement(print_event_info, ev);
if (ev->print(result_file, print_event_info))
@@ -2154,12 +1912,6 @@ static void die()
}
-static void print_version()
-{
- printf("%s Ver 3.5 for %s at %s\n", my_progname, SYSTEM_TYPE, MACHINE_TYPE);
-}
-
-
static void usage()
{
print_version();
@@ -2814,21 +2566,10 @@ static Exit_status check_master_version()
glob_description_event= NULL;
switch (version) {
- case 3:
- glob_description_event= new Format_description_log_event(1);
- break;
- case 4:
- glob_description_event= new Format_description_log_event(3);
- break;
case 5:
case 10:
- /*
- The server is soon going to send us its Format_description log
- event, unless it is a 5.0 server with 3.23 or 4.0 binlogs.
- So we first assume that this is 4.0 (which is enough to read the
- Format_desc event if one comes).
- */
- glob_description_event= new Format_description_log_event(3);
+ case 11:
+ glob_description_event= new Format_description_log_event(4);
break;
default:
error("Could not find server version: "
@@ -2887,8 +2628,6 @@ static Exit_status handle_event_text_mode(PRINT_EVENT_INFO *print_event_info,
}
Log_event_type type= ev->get_type_code();
- if (glob_description_event->binlog_version >= 3 ||
- (type != LOAD_EVENT && type != CREATE_FILE_EVENT))
{
/*
If this is a Rotate event, maybe it's the end of the requested binlog;
@@ -2947,31 +2686,6 @@ static Exit_status handle_event_text_mode(PRINT_EVENT_INFO *print_event_info,
if (retval != OK_CONTINUE)
DBUG_RETURN(retval);
}
- else
- {
- Load_log_event *le= (Load_log_event*)ev;
- const char *old_fname= le->fname;
- uint old_len= le->fname_len;
- File file;
- Exit_status retval;
- char fname[FN_REFLEN+1];
-
- if ((file= load_processor.prepare_new_file_for_old_format(le,fname)) < 0)
- {
- DBUG_RETURN(ERROR_STOP);
- }
-
- retval= process_event(print_event_info, ev, old_off, logname);
- if (retval != OK_CONTINUE)
- {
- my_close(file,MYF(MY_WME));
- DBUG_RETURN(retval);
- }
- retval= load_processor.load_old_format_file(net,old_fname,old_len,file);
- my_close(file,MYF(MY_WME));
- if (retval != OK_CONTINUE)
- DBUG_RETURN(retval);
- }
DBUG_RETURN(OK_CONTINUE);
}
@@ -3244,7 +2958,7 @@ static Exit_status check_header(IO_CACHE* file,
MY_STAT my_file_stat;
delete glob_description_event;
- if (!(glob_description_event= new Format_description_log_event(3)))
+ if (!(glob_description_event= new Format_description_log_event(4)))
{
error("Failed creating Format_description_log_event; out of memory?");
return ERROR_STOP;
@@ -3316,25 +3030,7 @@ static Exit_status check_header(IO_CACHE* file,
{
DBUG_PRINT("info",("buf[EVENT_TYPE_OFFSET=%d]=%d",
EVENT_TYPE_OFFSET, buf[EVENT_TYPE_OFFSET]));
- /* always test for a Start_v3, even if no --start-position */
- if (buf[EVENT_TYPE_OFFSET] == START_EVENT_V3)
- {
- /* This is 3.23 or 4.x */
- if (uint4korr(buf + EVENT_LEN_OFFSET) <
- (LOG_EVENT_MINIMAL_HEADER_LEN + START_V3_HEADER_LEN))
- {
- /* This is 3.23 (format 1) */
- delete glob_description_event;
- if (!(glob_description_event= new Format_description_log_event(1)))
- {
- error("Failed creating Format_description_log_event; "
- "out of memory?");
- return ERROR_STOP;
- }
- }
- break;
- }
- else if (tmp_pos >= start_position)
+ if (tmp_pos >= start_position)
break;
else if (buf[EVENT_TYPE_OFFSET] == FORMAT_DESCRIPTION_EVENT)
{
@@ -3807,7 +3503,6 @@ struct encryption_service_st encryption_handler=
#include "password.c"
#include "log_event.cc"
#include "log_event_client.cc"
-#include "log_event_old.cc"
#include "rpl_utility.cc"
#include "sql_string.cc"
#include "sql_list.cc"
diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c
index 12804b3c667..0a5a4e1a446 100644
--- a/client/mysqlcheck.c
+++ b/client/mysqlcheck.c
@@ -18,7 +18,7 @@
/* By Jani Tolonen, 2001-04-20, MySQL Development Team */
-#define CHECK_VERSION "2.7.4-MariaDB"
+#define VER "2.8"
#include "client_priv.h"
#include <m_ctype.h>
@@ -228,7 +228,6 @@ static const char *load_default_groups[]=
0 };
-static void print_version(void);
static void usage(void);
static int get_options(int *argc, char ***argv);
static int process_all_databases();
@@ -248,13 +247,6 @@ static char *fix_table_name(char *dest, char *src);
int what_to_do = 0;
-static void print_version(void)
-{
- printf("%s Ver %s Distrib %s, for %s (%s)\n", my_progname, CHECK_VERSION,
- MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE);
-} /* print_version */
-
-
static void usage(void)
{
DBUG_ENTER("usage");
diff --git a/client/mysqldump.c b/client/mysqldump.c
index 10f74fbb021..ebfaec5e693 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -40,7 +40,7 @@
*/
/* on merge conflict, bump to a higher version again */
-#define DUMP_VERSION "10.19"
+#define VER "10.19"
/**
First mysql version supporting sequences.
@@ -696,13 +696,6 @@ void check_io(FILE *file)
die(EX_EOF, "Got errno %d on write", errno);
}
-static void print_version(void)
-{
- printf("%s Ver %s Distrib %s, for %s (%s)\n",my_progname_short,DUMP_VERSION,
- MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE);
-} /* print_version */
-
-
static void short_usage_sub(FILE *f)
{
fprintf(f, "Usage: %s [OPTIONS] database [tables]\n", my_progname_short);
@@ -775,8 +768,8 @@ static void write_header(FILE *sql_file, const char *db_name)
else if (!opt_compact)
{
print_comment(sql_file, 0,
- "-- MariaDB dump %s Distrib %s, for %s (%s)\n--\n",
- DUMP_VERSION, MYSQL_SERVER_VERSION, SYSTEM_TYPE,
+ "-- MariaDB dump %s-%s, for %s (%s)\n--\n",
+ VER, MYSQL_SERVER_VERSION, SYSTEM_TYPE,
MACHINE_TYPE);
print_comment(sql_file, 0, "-- Host: %s ",
fix_for_comment(current_host ? current_host : "localhost"));
diff --git a/client/mysqlimport.c b/client/mysqlimport.c
index 82dbd682bd6..6307cd0c46e 100644
--- a/client/mysqlimport.c
+++ b/client/mysqlimport.c
@@ -27,7 +27,7 @@
** * *
** *************************
*/
-#define IMPORT_VERSION "3.7"
+#define VER "3.7"
#include "client_priv.h"
#include <my_sys.h>
@@ -194,13 +194,6 @@ static const char *load_default_groups[]=
0 };
-static void print_version(void)
-{
- printf("%s Ver %s Distrib %s, for %s (%s)\n" ,my_progname,
- IMPORT_VERSION, MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE);
-}
-
-
static void usage(void)
{
puts("Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.");
diff --git a/client/mysqlshow.c b/client/mysqlshow.c
index 6bc4e3978bb..236fa1e25fd 100644
--- a/client/mysqlshow.c
+++ b/client/mysqlshow.c
@@ -18,7 +18,7 @@
/* Show databases, tables or columns */
-#define SHOW_VERSION "9.10"
+#define VER "9.10"
#include "client_priv.h"
#include <my_sys.h>
@@ -276,13 +276,6 @@ static struct my_option my_long_options[] =
};
-static void print_version(void)
-{
- printf("%s Ver %s Distrib %s, for %s (%s)\n",my_progname,SHOW_VERSION,
- MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE);
-}
-
-
static void usage(void)
{
print_version();
diff --git a/client/mysqlslap.c b/client/mysqlslap.c
index 54df0124063..4ca69c9ed0a 100644
--- a/client/mysqlslap.c
+++ b/client/mysqlslap.c
@@ -67,7 +67,7 @@ TODO:
*/
-#define SLAP_VERSION "1.0"
+#define VER "1.0"
#define HUGE_STRING_LENGTH 8196
#define RAND_STRING_SIZE 126
@@ -720,13 +720,6 @@ static struct my_option my_long_options[] =
};
-static void print_version(void)
-{
- printf("%s Ver %s Distrib %s, for %s (%s)\n",my_progname, SLAP_VERSION,
- MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE);
-}
-
-
static void usage(void)
{
print_version();
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index 8107bd03a3b..75e9e913074 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -33,7 +33,7 @@
And many others
*/
-#define MTEST_VERSION "3.5"
+#define VER "3.5"
#include "client_priv.h"
#include <mysql_version.h>
@@ -7196,12 +7196,6 @@ static struct my_option my_long_options[] =
};
-void print_version(void)
-{
- printf("%s Ver %s Distrib %s, for %s (%s)\n",my_progname,MTEST_VERSION,
- MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE);
-}
-
void usage()
{
print_version();
@@ -8617,6 +8611,7 @@ end:
var_set_errno(mysql_stmt_errno(stmt));
+ display_optimizer_trace(cn, ds);
revert_properties();
/* Close the statement if reconnect, need new prepare */
diff --git a/cmake/configure.pl b/cmake/configure.pl
index 4085110b6fa..87099169b85 100644
--- a/cmake/configure.pl
+++ b/cmake/configure.pl
@@ -190,7 +190,7 @@ foreach my $option (@ARGV)
$cmakeargs = $cmakeargs." -DWITH_SSL=system";
next;
}
- if($option =~ /with-ssl$/)
+ if($option =~ /with-ssl$/ || $option =~ /with-ssl=bundled/)
{
$cmakeargs = $cmakeargs." -DWITH_SSL=bundled";
next;
diff --git a/cmake/cpack_rpm.cmake b/cmake/cpack_rpm.cmake
index d02f2952cfe..434ae1bb495 100644
--- a/cmake/cpack_rpm.cmake
+++ b/cmake/cpack_rpm.cmake
@@ -10,28 +10,27 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8.7)
SET(CPACK_RPM_COMPONENT_INSTALL ON)
SET(CPACK_COMPONENT_SERVER_GROUP "server")
-SET(CPACK_COMPONENT_MANPAGESSERVER_GROUP "server")
SET(CPACK_COMPONENT_INIFILES_GROUP "server")
SET(CPACK_COMPONENT_SERVER_SCRIPTS_GROUP "server")
SET(CPACK_COMPONENT_SUPPORTFILES_GROUP "server")
SET(CPACK_COMPONENT_DEVELOPMENT_GROUP "devel")
+SET(CPACK_COMPONENT_DEVELOPMENTSYMLINKS_GROUP "devel")
SET(CPACK_COMPONENT_MANPAGESDEVELOPMENT_GROUP "devel")
SET(CPACK_COMPONENT_TEST_GROUP "test")
-SET(CPACK_COMPONENT_MANPAGESTEST_GROUP "test")
+SET(CPACK_COMPONENT_TESTSYMLINKS_GROUP "test")
SET(CPACK_COMPONENT_CLIENT_GROUP "client")
-SET(CPACK_COMPONENT_MANPAGESCLIENT_GROUP "client")
SET(CPACK_COMPONENT_README_GROUP "server")
SET(CPACK_COMPONENT_SHAREDLIBRARIES_GROUP "shared")
SET(CPACK_COMPONENT_COMMON_GROUP "common")
SET(CPACK_COMPONENT_CLIENTPLUGINS_GROUP "common")
SET(CPACK_COMPONENT_COMPAT_GROUP "compat")
SET(CPACK_COMPONENT_BACKUP_GROUP "backup")
+SET(CPACK_COMPONENT_BACKUPSYMLINKS_GROUP "backup")
-SET(CPACK_COMPONENTS_ALL Server ManPagesServer IniFiles Server_Scripts
- SupportFiles Development ManPagesDevelopment
- ManPagesTest Readme ManPagesClient Test
- Common Client SharedLibraries ClientPlugins
- backup
+SET(CPACK_COMPONENTS_ALL Server IniFiles Server_Scripts SupportFiles
+ Development ManPagesDevelopment Readme Test Common
+ Client SharedLibraries ClientPlugins Backup
+ TestSymlinks BackupSymlinks DevelopmentSymlinks
)
SET(CPACK_RPM_PACKAGE_NAME ${CPACK_PACKAGE_NAME})
@@ -180,6 +179,24 @@ MACRO(SETA var)
ENDFOREACH()
ENDMACRO(SETA)
+FOREACH(SYM_COMPONENT Server Client)
+ STRING(TOLOWER ${SYM_COMPONENT}-compat SYM)
+ SET(SYMCOMP ${SYM_COMPONENT}Symlinks)
+ STRING(TOUPPER ${SYMCOMP} SYMCOMP_UPPER)
+ SET(CPACK_COMPONENT_${SYMCOMP_UPPER}_GROUP "${SYM}")
+ SET(CPACK_COMPONENTS_ALL "${CPACK_COMPONENTS_ALL}" "${SYMCOMP}")
+ SET(CPACK_RPM_${SYM}_PACKAGE_SUMMARY "MySQL compatible symlinks for MariaDB database ${SYM_COMPONENT} binaries/scripts")
+ SET(CPACK_RPM_${SYM}_PACKAGE_DESCRIPTION "${CPACK_RPM_PACKAGE_DESCRIPTION}")
+ SET(CPACK_RPM_${SYM}_PACKAGE_ARCHITECTURE "noarch")
+ SET(CPACK_RPM_${SYM}_USER_FILELIST ${ignored})
+ STRING(TOLOWER ${SYM_COMPONENT} SYM_COMPONENT_LOWER)
+ SET(CPACK_RPM_${SYM}_PACKAGE_REQUIRES "MariaDB-${SYM_COMPONENT_LOWER} >= 11.0.0")
+ SETA(CPACK_RPM_${SYM_COMPONENT_LOWER}_PACKAGE_RECOMMENDS "MariaDB-${SYM}")
+ENDFOREACH()
+
+SETA(CPACK_RPM_client_symlinks_PACKAGE_CONFLICTS
+ "MariaDB-server < 11.0.0")
+
SETA(CPACK_RPM_client_PACKAGE_OBSOLETES
"mysql-client"
"MySQL-client"
@@ -189,7 +206,9 @@ SETA(CPACK_RPM_client_PACKAGE_PROVIDES
"mysql-client"
"mytop")
SETA(CPACK_RPM_client_PACKAGE_CONFLICTS
- "MariaDB-server < 10.6.0")
+ "MariaDB-server < 11.0.0")
+SETA(CPACK_RPM_client_PACKAGE_REQUIRES
+ "MariaDB-common")
SETA(CPACK_RPM_common_PACKAGE_CONFLICTS
"MariaDB-server < 10.6.1")
@@ -221,7 +240,7 @@ SETA(CPACK_RPM_test_PACKAGE_PROVIDES
SETA(CPACK_RPM_server_PACKAGE_REQUIRES
"MariaDB-common >= 10.6.1"
- "MariaDB-client >= 10.6.1")
+ "MariaDB-client >= 11.0.0")
IF(WITH_WSREP)
SETA(CPACK_RPM_server_PACKAGE_REQUIRES
diff --git a/cmake/install_macros.cmake b/cmake/install_macros.cmake
index f14c0145bc8..fab3305b3ff 100644
--- a/cmake/install_macros.cmake
+++ b/cmake/install_macros.cmake
@@ -51,43 +51,24 @@ FUNCTION (INSTALL_DEBUG_SYMBOLS)
ENDIF()
ENDFUNCTION()
-# Installs manpage for given file (either script or executable)
-#
-FUNCTION(INSTALL_MANPAGE file)
- IF(NOT UNIX)
- RETURN()
- ENDIF()
- GET_FILENAME_COMPONENT(file_name "${file}" NAME)
- SET(GLOB_EXPR
- ${CMAKE_SOURCE_DIR}/man/*${file}man.1*
- ${CMAKE_SOURCE_DIR}/man/*${file}man.8*
- ${CMAKE_BINARY_DIR}/man/*${file}man.1*
- ${CMAKE_BINARY_DIR}/man/*${file}man.8*
- )
- IF(MYSQL_DOC_DIR)
- SET(GLOB_EXPR
- ${MYSQL_DOC_DIR}/man/*${file}man.1*
- ${MYSQL_DOC_DIR}/man/*${file}man.8*
- ${MYSQL_DOC_DIR}/man/*${file}.1*
- ${MYSQL_DOC_DIR}/man/*${file}.8*
- ${GLOB_EXPR}
- )
- ENDIF()
-
- FILE(GLOB_RECURSE MANPAGES ${GLOB_EXPR})
-
- IF(MANPAGES)
- LIST(GET MANPAGES 0 MANPAGE)
- STRING(REPLACE "${file}man.1" "${file}.1" MANPAGE "${MANPAGE}")
- STRING(REPLACE "${file}man.8" "${file}.8" MANPAGE "${MANPAGE}")
- IF(MANPAGE MATCHES "${file}.1")
- SET(SECTION man1)
- ELSE()
- SET(SECTION man8)
+FUNCTION(INSTALL_MANPAGES COMP)
+ FOREACH(f ${ARGN})
+ STRING(REGEX REPLACE "^.*\\.([1-8])$" "\\1" n ${f})
+ IF(NOT ${n})
+ MESSAGE(FATAL_ERROR "Wrong filename in INSTALL_MANPAGE(${f})")
ENDIF()
- INSTALL(FILES "${MANPAGE}" DESTINATION "${INSTALL_MANDIR}/${SECTION}"
- COMPONENT ManPages)
- ENDIF()
+ INSTALL(FILES ${f} DESTINATION ${INSTALL_MANDIR}/man${n} COMPONENT ${COMP})
+
+ STRING(REGEX REPLACE "\\.${n}$" "" f ${f})
+ LIST(FIND MARIADB_SYMLINK_FROMS ${f} i)
+ IF(i GREATER -1)
+ LIST(GET MARIADB_SYMLINK_TOS ${i} s)
+ SET(dst "${CMAKE_CURRENT_BINARY_DIR}/${s}.${n}")
+ FILE(WRITE ${dst} ".so man${n}/${f}.${n}")
+ INSTALL(FILES ${dst} DESTINATION ${INSTALL_MANDIR}/man${n}
+ COMPONENT ${COMP}Symlinks)
+ ENDIF()
+ ENDFOREACH()
ENDFUNCTION()
FUNCTION(INSTALL_SCRIPT)
@@ -109,8 +90,6 @@ FUNCTION(INSTALL_SCRIPT)
ENDIF()
INSTALL(PROGRAMS ${script} DESTINATION ${ARG_DESTINATION} COMPONENT ${COMP})
- get_filename_component(dest "${script}" NAME)
- INSTALL_MANPAGE(${dest})
ENDFUNCTION()
@@ -246,10 +225,6 @@ FUNCTION(MYSQL_INSTALL_TARGETS)
IF(SIGNCODE)
SIGN_TARGET(${target} ${COMP})
ENDIF()
- # Install man pages on Unix
- IF(UNIX)
- INSTALL_MANPAGE($<TARGET_FILE:${target}>)
- ENDIF()
ENDFOREACH()
INSTALL(TARGETS ${TARGETS} DESTINATION ${ARG_DESTINATION} ${COMP})
diff --git a/cmake/mysql_add_executable.cmake b/cmake/mysql_add_executable.cmake
index 06bdfce14de..f9faabab9cd 100644
--- a/cmake/mysql_add_executable.cmake
+++ b/cmake/mysql_add_executable.cmake
@@ -106,7 +106,7 @@ FUNCTION (MYSQL_ADD_EXECUTABLE)
${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/${link}
DESTINATION
${ARG_DESTINATION}
- COMPONENT ${COMP})
+ COMPONENT ${COMP}Symlinks)
ELSE()
# Windows note:
# Here, hardlinks are used, because cmake can't install symlinks.
diff --git a/cmake/plugin.cmake b/cmake/plugin.cmake
index 813d8ef6e42..d3f214c6146 100644
--- a/cmake/plugin.cmake
+++ b/cmake/plugin.cmake
@@ -250,7 +250,10 @@ MACRO(MYSQL_ADD_PLUGIN)
ELSE()
SET(ver "")
ENDIF()
- SET(CPACK_COMPONENTS_ALL ${CPACK_COMPONENTS_ALL} ${ARG_COMPONENT})
+ STRING(TOUPPER ${ARG_COMPONENT} ARG_COMPONENT_UPPER)
+ SET(CPACK_COMPONENT_${ARG_COMPONENT_UPPER}SYMLINKS_GROUP ${ARG_COMPONENT} PARENT_SCOPE)
+ SET(CPACK_COMPONENT_${ARG_COMPONENT_UPPER}_GROUP ${ARG_COMPONENT} PARENT_SCOPE)
+ SET(CPACK_COMPONENTS_ALL ${CPACK_COMPONENTS_ALL} ${ARG_COMPONENT} ${ARG_COMPONENT}Symlinks)
SET(CPACK_COMPONENTS_ALL ${CPACK_COMPONENTS_ALL} PARENT_SCOPE)
IF (NOT ARG_CLIENT)
diff --git a/debian/additions/innotop/innotop b/debian/additions/innotop/innotop
index 2bc090917fe..d47b122f29c 100644
--- a/debian/additions/innotop/innotop
+++ b/debian/additions/innotop/innotop
@@ -466,7 +466,7 @@ sub parse_status_text {
# too many locks to print, the output might be truncated)
my $time_text;
- if ( ($mysqlversion =~ /^5\.[67]\./) || ($mysqlversion =~ /^10\.[0-9]\./) ) {
+ if ( ($mysqlversion =~ /^5\.[67]\./) || ($mysqlversion =~ /^10|11\.[0-9]\./) ) {
( $time_text ) = $fulltext =~ m/^([0-9-]* [0-9:]*) [0-9a-fx]* INNODB MONITOR OUTPUT/m;
$innodb_data{'ts'} = [ parse_innodb_timestamp_56( $time_text ) ];
} else {
@@ -634,7 +634,7 @@ sub parse_fk_section {
return 0 unless $fulltext;
my ( $ts, $type );
- if ( ($mysqlversion =~ /^5.[67]\./) || ($mysqlversion =~ /^10.[0-9]\./) ) {
+ if ( ($mysqlversion =~ /^5.[67]\./) || ($mysqlversion =~ /^10|11.[0-9]\./) ) {
( $ts, $type ) = $fulltext =~ m/^([0-9-]* [0-9:]*)\s[0-9a-fx]*\s+(\w+)/m;
$section->{'ts'} = [ parse_innodb_timestamp_56( $ts ) ];
} else {
@@ -894,7 +894,7 @@ sub parse_dl_section {
my ( $ts ) = $fulltext =~ m/^$s$/m;
return 0 unless $ts;
- if ( ($mysqlversion =~ /^5\.[67]\./) || ($mysqlversion =~ /^10\.[0-9]\./) ) {
+ if ( ($mysqlversion =~ /^5\.[67]\./) || ($mysqlversion =~ /^10|11\.[0-9]\./) ) {
$dl->{'ts'} = [ parse_innodb_timestamp_56( $ts ) ];
}
else {
diff --git a/debian/additions/mariadb.conf.d/50-mariadb-clients.cnf b/debian/additions/mariadb.conf.d/50-mariadb-clients.cnf
new file mode 100644
index 00000000000..3c4fad877e4
--- /dev/null
+++ b/debian/additions/mariadb.conf.d/50-mariadb-clients.cnf
@@ -0,0 +1,22 @@
+#
+# These groups are read by MariaDB command-line tools
+# Use it for options that affect only one utility
+#
+
+[mariadb-client]
+
+[mariadb-upgrade]
+
+[mariadb-admin]
+
+[mariadb-binlog]
+
+[mariadb-check]
+
+[mariadb-dump]
+
+[mariadb-import]
+
+[mariadb-show]
+
+[mariadb-slap]
diff --git a/debian/additions/mariadb.conf.d/50-mysqld_safe.cnf b/debian/additions/mariadb.conf.d/50-mariadb_safe.cnf
index e24f96a9e65..4467b087d9d 100644
--- a/debian/additions/mariadb.conf.d/50-mysqld_safe.cnf
+++ b/debian/additions/mariadb.conf.d/50-mariadb_safe.cnf
@@ -4,7 +4,7 @@
# For similar behavior, systemd users should create the following file:
# /etc/systemd/system/mariadb.service.d/migrated-from-my.cnf-settings.conf
#
-# To achieve the same result as the default 50-mysqld_safe.cnf, please create
+# To achieve the same result as the default 50-mariadb_safe.cnf, please create
# /etc/systemd/system/mariadb.service.d/migrated-from-my.cnf-settings.conf
# with the following contents:
#
@@ -14,11 +14,11 @@
# StandardError = syslog
# SyslogFacility = daemon
# SyslogLevel = err
-# SyslogIdentifier = mysqld
+# SyslogIdentifier = mariadbd
#
# For more information, please read https://mariadb.com/kb/en/mariadb/systemd/
-[mysqld_safe]
+[mariadbd-safe]
# This will be passed to all mysql clients
# It has been reported that passwords should be enclosed with ticks/quotes
# especially if they contain "#" chars...
diff --git a/debian/additions/mariadb.conf.d/50-mysql-clients.cnf b/debian/additions/mariadb.conf.d/50-mysql-clients.cnf
deleted file mode 100644
index 2f5a3605409..00000000000
--- a/debian/additions/mariadb.conf.d/50-mysql-clients.cnf
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# These groups are read by MariaDB command-line tools
-# Use it for options that affect only one utility
-#
-
-[mysql]
-
-[mysql_upgrade]
-
-[mysqladmin]
-
-[mysqlbinlog]
-
-[mysqlcheck]
-
-[mysqldump]
-
-[mysqlimport]
-
-[mysqlshow]
-
-[mysqlslap]
diff --git a/debian/additions/mariadb.conf.d/50-server.cnf b/debian/additions/mariadb.conf.d/50-server.cnf
index 5e958e9e697..6dcfe16cd55 100644
--- a/debian/additions/mariadb.conf.d/50-server.cnf
+++ b/debian/additions/mariadb.conf.d/50-server.cnf
@@ -5,8 +5,8 @@
# this is read by the standalone daemon and embedded servers
[server]
-# this is only for the mysqld standalone daemon
-[mysqld]
+# this is only for the mariadbd daemon
+[mariadbd]
#
# * Basic Settings
@@ -52,7 +52,7 @@ bind-address = 127.0.0.1
# When running under systemd, error logging goes via stdout/stderr to journald
# and when running legacy init error logging goes to syslog due to
-# /etc/mysql/conf.d/mariadb.conf.d/50-mysqld_safe.cnf
+# /etc/mysql/conf.d/mariadb.conf.d/50-mariadb_safe.cnf
# Enable this if you want to have error logging into a separate file
#log_error = /var/log/mysql/error.log
# Enable the slow query log to see queries with especially long duration
@@ -63,8 +63,8 @@ bind-address = 127.0.0.1
#log_slow_min_examined_row_limit = 1000
# The following can be used as easy to replay backup logs or for replication.
-# note: if you are setting up a replication slave, see README.Debian about
-# other settings you may need to change.
+# note: if you are setting up a replica, see README.Debian about other
+# settings you may need to change.
#server-id = 1
#log_bin = /var/log/mysql/mysql-bin.log
expire_logs_days = 10
@@ -106,9 +106,9 @@ collation-server = utf8mb4_general_ci
# This group is only read by MariaDB servers, not by MySQL.
# If you use the same .cnf file for MySQL and MariaDB,
# you can put MariaDB-only options here
-[mariadb]
+[mariadbd]
-# This group is only read by MariaDB-10.11 servers.
+# This group is only read by MariaDB-11.0 servers.
# If you use the same .cnf file for MariaDB of different versions,
# use this group for options that older servers don't understand
-[mariadb-10.11]
+[mariadb-11.0]
diff --git a/debian/changelog b/debian/changelog
index af6536d9be9..60ef886f6dd 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,4 +1,4 @@
-mariadb (1:10.11.0) unstable; urgency=medium
+mariadb (1:11.0.0) unstable; urgency=medium
* Initial Release
diff --git a/debian/control b/debian/control
index 91766d64b3e..ae4b5e3ba95 100644
--- a/debian/control
+++ b/debian/control
@@ -544,7 +544,8 @@ Provides: default-mysql-client,
virtual-mysql-client
Recommends: libdbd-mariadb-perl | libdbd-mysql-perl,
libdbi-perl,
- libterm-readkey-perl
+ libterm-readkey-perl,
+ mariadb-client-compat
Description: MariaDB database client binaries
MariaDB is a fast, stable and true multi-user, multi-threaded SQL database
server. SQL (Structured Query Language) is the most popular database query
@@ -554,6 +555,81 @@ Description: MariaDB database client binaries
This package includes the client binaries and the additional tools
innotop and mariadb-report (mysqlreport).
+Package: mariadb-client-compat
+Architecture: all
+Depends: mariadb-client (>= ${source:Version})
+Multi-Arch: foreign
+Description: MySQL compatibility links to mariadb-client binaries/scripts.
+Conflicts: mariadb-client (< 11.0.0),
+ mariadb-client-10.0,
+ mariadb-client-10.1,
+ mariadb-client-10.2,
+ mariadb-client-10.3,
+ mariadb-client-10.4,
+ mariadb-client-10.5,
+ mariadb-client-10.6,
+ mariadb-client-10.7,
+ mariadb-client-10.8,
+ mariadb-client-5.1,
+ mariadb-client-5.2,
+ mariadb-client-5.3,
+ mariadb-client-5.5,
+ mariadb-client-core (< 11.0.0),
+ mariadb-client-core-10.0,
+ mariadb-client-core-10.1,
+ mariadb-client-core-10.2,
+ mariadb-client-core-10.3,
+ mariadb-client-core-10.4,
+ mariadb-client-core-10.5,
+ mariadb-client-core-10.6,
+ mariadb-client-core-10.7,
+ mariadb-client-core-10.8,
+ mariadb-client-core-5.1,
+ mariadb-client-core-5.2,
+ mariadb-client-core-5.3,
+ mariadb-client-core-5.5,
+ mysql-client (<< 5.0.51),
+ mysql-client (<< 5.0.51),
+ mysql-client-5.0,
+ mysql-client-5.1,
+ mysql-client-5.5,
+ mysql-client-5.6,
+ mysql-client-5.7,
+ mysql-client-8.0,
+ mysql-client-core-5.0,
+ mysql-client-core-5.1,
+ mysql-client-core-5.5,
+ mysql-client-core-5.6,
+ mysql-client-core-5.7,
+ mysql-client-core-8.0,
+ mariadb-server (< 11.0.0),
+ mariadb-server-10.0,
+ mariadb-server-10.1,
+ mariadb-server-10.2,
+ mariadb-server-10.3,
+ mariadb-server-10.4,
+ mariadb-server-10.5,
+ mariadb-server-10.6,
+ mariadb-server-10.7,
+ mariadb-server-10.8,
+ mariadb-server-core (< 11.0.0),
+ mariadb-server-core-10.0,
+ mariadb-server-core-10.1,
+ mariadb-server-core-10.2,
+ mariadb-server-core-10.3,
+ mariadb-server-core-10.4,
+ mariadb-server-core-10.5,
+ mariadb-server-core-10.6,
+ mariadb-server-core-10.7,
+ mariadb-server-core-10.8,
+ mysql-server-core-8.0,
+ mysql-server-5.7,
+ percona-server-server-5.6,
+ percona-server-server,
+ percona-xtradb-cluster-server-5.6,
+ percona-xtradb-cluster-server-5.7,
+ percona-xtradb-cluster-server
+
Package: mariadb-server-core
Architecture: any
Depends: mariadb-common (>= ${source:Version}),
@@ -668,7 +744,8 @@ Suggests: mailx,
mariadb-test,
netcat-openbsd
Recommends: libhtml-template-perl,
- pv
+ pv,
+ mariadb-server-compat
Pre-Depends: adduser (>= 3.40),
debconf,
mariadb-common (>= ${source:Version})
@@ -772,6 +849,53 @@ Description: MariaDB database server binaries
.
This package includes the server binaries.
+Package: mariadb-server-compat
+Architecture: all
+Depends: mariadb-server (>= ${source:Version}),
+Multi-Arch: foreign
+Description: MySQL compatibility links to mariadb-server binaries/scripts.
+Conflicts: mariadb-server-core (< 11.0.0),
+ mariadb-server-core-10.0,
+ mariadb-server-core-10.1,
+ mariadb-server-core-10.2,
+ mariadb-server-core-10.3,
+ mariadb-server-core-10.4,
+ mariadb-server-core-10.5,
+ mariadb-server-core-10.6,
+ mariadb-server-core-10.7,
+ mariadb-server-core-10.8,
+ mariadb-server-core-5.1,
+ mariadb-server-core-5.2,
+ mariadb-server-core-5.3,
+ mariadb-server-core-5.5,
+ mariadb-server (< 11.0.0),
+ mariadb-server-10.0,
+ mariadb-server-10.1,
+ mariadb-server-10.2,
+ mariadb-server-10.3,
+ mariadb-server-10.4,
+ mariadb-server-10.5,
+ mariadb-server-10.6,
+ mariadb-server-10.7,
+ mariadb-server-10.8,
+ mariadb-server-5.1,
+ mariadb-server-5.2,
+ mariadb-server-5.3,
+ mariadb-server-5.5,
+ mysql-server-5.0,
+ mysql-server-core-5.0,
+ mysql-server-core-5.1,
+ mysql-server-core-5.5,
+ mysql-server-core-5.6,
+ mysql-server-core-5.7,
+ mysql-server-core-8.0,
+ percona-server-server-5.6,
+ percona-server-server-5.7,
+ percona-server-server,
+ percona-xtradb-cluster-server-5.6,
+ percona-xtradb-cluster-server-5.7,
+ percona-xtradb-cluster-server
+
Package: mariadb-backup
Architecture: any
Breaks: mariadb-backup-10.1,
diff --git a/debian/mariadb-client-compat.install b/debian/mariadb-client-compat.install
new file mode 100644
index 00000000000..4800fc3a720
--- /dev/null
+++ b/debian/mariadb-client-compat.install
@@ -0,0 +1,38 @@
+usr/bin/mysql
+usr/bin/mysqlbinlog
+usr/bin/mysql_convert_table_format
+usr/bin/mysql_find_rows
+usr/bin/mysql_fix_extensions
+usr/bin/mysql_plugin
+usr/bin/mysql_setpermission
+usr/bin/mysql_tzinfo_to_sql
+usr/bin/mysql_waitpid
+usr/bin/mysqlaccess
+usr/bin/mysqladmin
+usr/bin/mysqlcheck
+usr/bin/mysqldump
+usr/bin/mysqldumpslow
+usr/bin/mysqlhotcopy
+usr/bin/mysqlimport
+usr/bin/mysql_secure_installation
+usr/bin/mysqlshow
+usr/bin/mysqlslap
+usr/share/man/man1/mysql.1
+usr/share/man/man1/mysql_convert_table_format.1
+usr/share/man/man1/mysql_find_rows.1
+usr/share/man/man1/mysql_fix_extensions.1
+usr/share/man/man1/mysql_plugin.1
+usr/share/man/man1/mysql_secure_installation.1
+usr/share/man/man1/mysql_setpermission.1
+usr/share/man/man1/mysql_tzinfo_to_sql.1
+usr/share/man/man1/mysql_waitpid.1
+usr/share/man/man1/mysqlaccess.1
+usr/share/man/man1/mysqladmin.1
+usr/share/man/man1/mysqlbinlog.1
+usr/share/man/man1/mysqldump.1
+usr/share/man/man1/mysqldumpslow.1
+usr/share/man/man1/mysqlhotcopy.1
+usr/share/man/man1/mysqlimport.1
+usr/share/man/man1/mysqlshow.1
+usr/share/man/man1/mysqlslap.1
+usr/share/man/man1/mysqlcheck.1
diff --git a/debian/mariadb-client-compat.links b/debian/mariadb-client-compat.links
new file mode 100644
index 00000000000..a661ee26104
--- /dev/null
+++ b/debian/mariadb-client-compat.links
@@ -0,0 +1,9 @@
+usr/bin/mariadb-check usr/bin/mysqlanalyze
+usr/bin/mariadb-check usr/bin/mysqlcheck
+usr/bin/mariadb-check usr/bin/mysqloptimize
+usr/bin/mariadb-check usr/bin/mysqlrepair
+usr/bin/mariadb-report usr/bin/mysqlreport
+usr/share/man/man1/mariadb-check.1.gz usr/share/man/man1/mysqlanalyze.1.gz
+usr/share/man/man1/mariadb-check.1.gz usr/share/man/man1/mysqloptimize.1.gz
+usr/share/man/man1/mariadb-check.1.gz usr/share/man/man1/mysqlrepair.1.gz
+usr/share/man/man1/mariadb-report.1.gz usr/share/man/man1/mysqlreport.1.gz
diff --git a/debian/mariadb-client-core.install b/debian/mariadb-client-core.install
index bcc3515a02f..ddb8015e1cb 100644
--- a/debian/mariadb-client-core.install
+++ b/debian/mariadb-client-core.install
@@ -1,9 +1,6 @@
usr/bin/mariadb
usr/bin/mariadb-check
usr/bin/my_print_defaults
-usr/bin/mysql
usr/share/man/man1/mariadb-check.1
usr/share/man/man1/mariadb.1
usr/share/man/man1/my_print_defaults.1
-usr/share/man/man1/mysql.1
-usr/share/man/man1/mysqlcheck.1
diff --git a/debian/mariadb-client.install b/debian/mariadb-client.install
index fba8d093810..efc56b33018 100644
--- a/debian/mariadb-client.install
+++ b/debian/mariadb-client.install
@@ -1,7 +1,7 @@
debian/additions/innotop/innotop usr/bin/
debian/additions/mariadb-report usr/bin/
debian/additions/mariadb.conf.d/50-client.cnf etc/mysql/mariadb.conf.d
-debian/additions/mariadb.conf.d/50-mysql-clients.cnf etc/mysql/mariadb.conf.d
+debian/additions/mariadb.conf.d/50-mariadb-clients.cnf etc/mysql/mariadb.conf.d
debian/additions/mariadb.conf.d/60-galera.cnf etc/mysql/mariadb.conf.d
usr/bin/mariadb-access
usr/bin/mariadb-admin
@@ -22,17 +22,6 @@ usr/bin/mariadb-slap
usr/bin/mariadb-tzinfo-to-sql
usr/bin/mariadb-waitpid
usr/bin/msql2mysql
-usr/bin/mysql_find_rows
-usr/bin/mysql_fix_extensions
-usr/bin/mysql_waitpid
-usr/bin/mysqlaccess
-usr/bin/mysqladmin
-usr/bin/mysqlcheck
-usr/bin/mysqldump
-usr/bin/mysqldumpslow
-usr/bin/mysqlimport
-usr/bin/mysqlshow
-usr/bin/mysqlslap
usr/bin/mytop
usr/bin/perror
usr/bin/replace
@@ -56,23 +45,6 @@ usr/share/man/man1/mariadb-slap.1
usr/share/man/man1/mariadb-tzinfo-to-sql.1
usr/share/man/man1/mariadb-waitpid.1
usr/share/man/man1/msql2mysql.1
-usr/share/man/man1/mysql_convert_table_format.1
-usr/share/man/man1/mysql_find_rows.1
-usr/share/man/man1/mysql_fix_extensions.1
-usr/share/man/man1/mysql_plugin.1
-usr/share/man/man1/mysql_secure_installation.1
-usr/share/man/man1/mysql_setpermission.1
-usr/share/man/man1/mysql_tzinfo_to_sql.1
-usr/share/man/man1/mysql_waitpid.1
-usr/share/man/man1/mysqlaccess.1
-usr/share/man/man1/mysqladmin.1
-usr/share/man/man1/mysqlbinlog.1
-usr/share/man/man1/mysqldump.1
-usr/share/man/man1/mysqldumpslow.1
-usr/share/man/man1/mysqlhotcopy.1
-usr/share/man/man1/mysqlimport.1
-usr/share/man/man1/mysqlshow.1
-usr/share/man/man1/mysqlslap.1
usr/share/man/man1/mytop.1
usr/share/man/man1/perror.1
usr/share/man/man1/replace.1
diff --git a/debian/mariadb-client.links b/debian/mariadb-client.links
index 62e3651daf5..c65cb3d42cd 100644
--- a/debian/mariadb-client.links
+++ b/debian/mariadb-client.links
@@ -2,16 +2,7 @@ usr/bin/mariadb-check usr/bin/mariadb-analyze
usr/bin/mariadb-check usr/bin/mariadb-optimize
usr/bin/mariadb-check usr/bin/mariadb-repair
usr/bin/mariadb-check usr/bin/mariadbcheck
-usr/bin/mariadb-check usr/bin/mysqlanalyze
-usr/bin/mariadb-check usr/bin/mysqlcheck
-usr/bin/mariadb-check usr/bin/mysqloptimize
-usr/bin/mariadb-check usr/bin/mysqlrepair
-usr/bin/mariadb-report usr/bin/mysqlreport
usr/share/man/man1/mariadb-check.1.gz usr/share/man/man1/mariadb-analyze.1.gz
usr/share/man/man1/mariadb-check.1.gz usr/share/man/man1/mariadb-optimize.1.gz
usr/share/man/man1/mariadb-check.1.gz usr/share/man/man1/mariadb-repair.1.gz
usr/share/man/man1/mariadb-check.1.gz usr/share/man/man1/mariadbcheck.1.gz
-usr/share/man/man1/mariadb-check.1.gz usr/share/man/man1/mysqlanalyze.1.gz
-usr/share/man/man1/mariadb-check.1.gz usr/share/man/man1/mysqloptimize.1.gz
-usr/share/man/man1/mariadb-check.1.gz usr/share/man/man1/mysqlrepair.1.gz
-usr/share/man/man1/mariadb-report.1.gz usr/share/man/man1/mysqlreport.1.gz
diff --git a/debian/mariadb-server-compat.install b/debian/mariadb-server-compat.install
new file mode 100644
index 00000000000..36701d8e0c6
--- /dev/null
+++ b/debian/mariadb-server-compat.install
@@ -0,0 +1,12 @@
+usr/bin/mysqld_multi
+usr/bin/mysqld_safe
+usr/bin/mysqld_safe_helper
+usr/bin/mysql_install_db
+usr/bin/mysql_upgrade
+usr/sbin/mysqld
+usr/share/man/man1/mysqld_multi.1
+usr/share/man/man1/mysqld_safe.1
+usr/share/man/man1/mysqld_safe_helper.1
+usr/share/man/man1/mysql_install_db.1
+usr/share/man/man1/mysql_upgrade.1
+usr/share/man/man8/mysqld.8
diff --git a/debian/mariadb-server-core.install b/debian/mariadb-server-core.install
index 26870f3f422..d61c42c77c4 100644
--- a/debian/mariadb-server-core.install
+++ b/debian/mariadb-server-core.install
@@ -1,19 +1,13 @@
usr/bin/innochecksum
usr/bin/mariadb-install-db
usr/bin/mariadb-upgrade
-usr/bin/mysql_install_db
-usr/bin/mysql_upgrade
usr/bin/resolveip
usr/sbin/mariadbd
-usr/sbin/mysqld
usr/share/man/man1/innochecksum.1
usr/share/man/man1/mariadb-install-db.1
usr/share/man/man1/mariadb-upgrade.1
-usr/share/man/man1/mysql_install_db.1
-usr/share/man/man1/mysql_upgrade.1
usr/share/man/man1/resolveip.1
usr/share/man/man8/mariadbd.8
-usr/share/man/man8/mysqld.8
usr/share/mysql/bulgarian
usr/share/mysql/charsets
usr/share/mysql/chinese
diff --git a/debian/mariadb-server.README.Debian b/debian/mariadb-server.README.Debian
index 6042249a706..7f229a149f8 100644
--- a/debian/mariadb-server.README.Debian
+++ b/debian/mariadb-server.README.Debian
@@ -1,5 +1,5 @@
-* MYSQL WON'T START OR STOP?
-============================
+* MARIADB WON'T START OR STOP?
+==============================
The most common reasons the server does not start are:
- AppArmor is enforced and something is wrong with the confinement profile.
@@ -10,12 +10,12 @@ First check the contents of syslog (or systemd journal) and then check the
logs at /var/log/mysql/ for any hints of what might be wrong.
Examples:
- grep mysql /var/log/syslog
+ grep mariadbd /var/log/syslog
journalctl -u mariadb
-* NEW SERVICE NAME, PROCESS AND BINARY NAMES IN MARIADB 10.5
-============================================================
+* NEW SERVICE NAME, PROCESS AND BINARY NAMES SINCE MARIADB 10.5
+===============================================================
Starting form MariaDB 10.5, the default SysV init service name is 'mariadb',
and can be accessed at path /etc/init.d/mariadb. The alias 'mysql' is only
@@ -38,7 +38,7 @@ https://mariadb.com/kb/en/mariadb/systemd/
* MIXING PACKAGES FROM MARIADB.ORG AND OFFICIAL DEBIAN REPOSITORIES
-==================================================================
+===================================================================
Please note that the MariaDB packaging in official Debian repositories are of
a completely new generation compared to the legacy packaging used in MariaDB.org
@@ -57,10 +57,10 @@ revision string.
On new installs no root password is set and no debian-sys-maint user is
created anymore. Instead the MariaDB root account is set to be authenticated
-using the Unix socket, e.g. any mysqld invocation by root or via sudo will
-let the user see the mysqld prompt.
+using the Unix socket, e.g. any mariadb invocation by root or via sudo will
+let the user see the MariaDB prompt.
-You may never ever delete the mysql user "root". Although it has no password
+You may never ever delete the MariaDB user "root". Although it has no password
is set, the unix_auth plugin ensure that it can only be run locally as the root
user.
@@ -84,14 +84,14 @@ MariaDB in Debian is secure by default, because:
- There is no root account with password anymore. The system admin needs to
create one themselves if they need it. With no password, all issues related
to password management and password leaking are gone. Sysadmins can access
- the database without a password simply by running 'sudo mysql' thanks to
+ the database without a password simply by running 'sudo mariadb' thanks to
socket based authentication, which detects the system root user and allows
- them to use the mysqld console as the mysql root user. For details see
+ them to use the mariadb console as the MariaDB root user. For details see
https://www.slideshare.net/ottokekalainen/less-passwords-more-security-unix-socket-authentication-and-other-mariadb-hardening-tips
- There is no test database nor test accounts in the out-of-the-box Debian
installation.
-Therefore there is also no need to run the 'mysql_secure_installation'. In fact
+Therefore there is also no need to run the 'mariadb-secure-installation'. In fact
that script will try to do things that are already prevented, and might fail.
@@ -102,18 +102,18 @@ The privilege tables are automatically updated so all there is left is read
the release notes on https://mariadb.com/kb/en/release-notes/ to see if any
changes affect custom apps.
-There should not be any need to run 'mysql_upgrade' manually, as the upgrade
+There should not be any need to run 'mariadb-upgrade' manually, as the upgrade
scripts do that automatically.
* WHAT TO DO AFTER INSTALLATION
===============================
-The MySQL manual describes certain steps to do at this stage in a separate
+The MariaDB manual describes certain steps to do at this stage in a separate
chapter. They are not necessary as the Debian packages does them
automatically.
-There should not be any need to run 'mysql_install_db' manually, as the install
+There should not be any need to run 'mariadb-install-db' manually, as the install
scripts do that automatically.
The only thing that is left over for the admin is
@@ -125,7 +125,7 @@ The only thing that is left over for the admin is
============
For security reasons, the Debian package has enabled networking only on the
-loop-back device using "bind-address" in /etc/mysql/my.cnf. Check with
+loop-back device using "bind-address" in /etc/mysql/mariadb.cnf. Check with
"netstat -tlnp" where it is listening. If your connection is aborted
immediately check your firewall rules or network routes.
@@ -145,16 +145,16 @@ If your local Unix account is the one you want to have local super user
access on your database with you can create the following account that will
only work for the local Unix user connecting to the database locally.
- sudo /usr/bin/mysql -e "GRANT ALL ON *.* TO '$USER'@'localhost' IDENTIFIED VIA unix_socket WITH GRANT OPTION"
+ sudo /usr/bin/mariadb -e "GRANT ALL ON *.* TO '$USER'@'localhost' IDENTIFIED VIA unix_socket WITH GRANT OPTION"
To create a local machine account username=USERNAME with a password:
- sudo /usr/bin/mysql -e "GRANT ALL ON *.* TO 'USERNAME'@'localhost' IDENTIFIED BY 'password' WITH GRANT OPTION"
+ sudo /usr/bin/mariadb -e "GRANT ALL ON *.* TO 'USERNAME'@'localhost' IDENTIFIED BY 'password' WITH GRANT OPTION"
To create a USERNAME user with password 'password' admin user that can access
the DB server over the network:
- sudo /usr/bin/mysql -e "GRANT ALL ON *.* TO 'USERNAME'@'%' IDENTIFIED BY 'password' WITH GRANT OPTION"
+ sudo /usr/bin/mariadb -e "GRANT ALL ON *.* TO 'USERNAME'@'%' IDENTIFIED BY 'password' WITH GRANT OPTION"
Scripts should run as a user who have the required grants and be identified via unix_socket.
@@ -175,13 +175,12 @@ https://mariadb.com/kb/en/configuring-mariadb-with-mycnf/.
* FURTHER NOTES ON REPLICATION
==============================
-If the MySQL server is acting as a replication slave, you should not
-set --tmpdir to point to a directory on a memory-based file system or to
-a directory that is cleared when the server host restarts. A replication
-slave needs some of its temporary files to survive a machine restart so
-that it can replicate temporary tables or LOAD DATA INFILE operations. If
-files in the temporary file directory are lost when the server restarts,
-replication fails.
+If the MariaDB server is acting as a replica, you should not set --tmpdir to
+point to a directory on a memory-based file system or to a directory that is
+cleared when the server host restarts. A replica needs some of its temporary
+files to survive a machine restart so that it can replicate temporary tables
+or LOAD DATA INFILE operations. If files in the temporary file directory are
+lost when the server restarts, replication fails.
* DOWNGRADING
@@ -193,7 +192,7 @@ You might get lucky downgrading a few minor versions without issued. Take a
backup first. If you break it you get to keep both pieces. Do a restore from
backup or upgrade to the previous version.
-If doing a major version downgrade, take a mysqldump/maria-backup consistent
+If doing a major version downgrade, take a mariadb-dump/maria-backup consistent
backup using the current version and reload after downgrading and purging
existing databases.
diff --git a/debian/mariadb-server.install b/debian/mariadb-server.install
index 2350196e9d3..4a849bc2a46 100644
--- a/debian/mariadb-server.install
+++ b/debian/mariadb-server.install
@@ -1,7 +1,7 @@
debian/additions/debian-start etc/mysql
debian/additions/debian-start.inc.sh usr/share/mysql
debian/additions/echo_stderr usr/share/mysql
-debian/additions/mariadb.conf.d/50-mysqld_safe.cnf etc/mysql/mariadb.conf.d
+debian/additions/mariadb.conf.d/50-mariadb_safe.cnf etc/mysql/mariadb.conf.d
debian/additions/mariadb.conf.d/50-server.cnf etc/mysql/mariadb.conf.d
debian/additions/source_mariadb.py usr/share/apport/package-hooks
etc/apparmor.d/usr.sbin.mariadbd
@@ -27,16 +27,6 @@ usr/bin/myisam_ftdump
usr/bin/myisamchk
usr/bin/myisamlog
usr/bin/myisampack
-usr/bin/mysql_convert_table_format
-usr/bin/mysql_plugin
-usr/bin/mysql_secure_installation
-usr/bin/mysql_setpermission
-usr/bin/mysql_tzinfo_to_sql
-usr/bin/mysqlbinlog
-usr/bin/mysqld_multi
-usr/bin/mysqld_safe
-usr/bin/mysqld_safe_helper
-usr/bin/mysqlhotcopy
usr/bin/wsrep_sst_common
usr/bin/wsrep_sst_mariabackup
usr/bin/wsrep_sst_mysqldump
@@ -80,9 +70,6 @@ usr/share/man/man1/myisam_ftdump.1
usr/share/man/man1/myisamchk.1
usr/share/man/man1/myisamlog.1
usr/share/man/man1/myisampack.1
-usr/share/man/man1/mysqld_multi.1
-usr/share/man/man1/mysqld_safe.1
-usr/share/man/man1/mysqld_safe_helper.1
usr/share/man/man1/wsrep_sst_common.1
usr/share/man/man1/wsrep_sst_mariabackup.1
usr/share/man/man1/wsrep_sst_mysqldump.1
diff --git a/debian/source/lintian-overrides b/debian/source/lintian-overrides
index 306ab27271d..7686591f0a8 100644
--- a/debian/source/lintian-overrides
+++ b/debian/source/lintian-overrides
@@ -7,22 +7,6 @@ version-substvar-for-external-package libmariadb-dev -> libmysqld-dev
version-substvar-for-external-package Replaces ${source:Version} libmariadb-dev -> libmysqlclient-dev [debian/control:74]
version-substvar-for-external-package Replaces ${source:Version} libmariadb-dev -> libmysqld-dev [debian/control:74]
version-substvar-for-external-package libmariadbd-dev -> libmariadbclient-dev
-version-substvar-for-external-package Replaces ${source:Version} libmariadbd-dev -> libmariadbclient-dev [debian/control:232]
-version-substvar-for-external-package Conflicts (line 408) ${source:Version} mariadb-client -> mariadb-client-10.11
-version-substvar-for-external-package Conflicts (line 575) ${source:Version} mariadb-server-core -> mariadb-server-core-10.11
-version-substvar-for-external-package Conflicts (line 711) ${source:Version} mariadb-server -> mariadb-server-10.11
-version-substvar-for-external-package Conflicts (line 95) ${source:Version} libmariadb-dev-compat -> libmariadbclient-dev
-version-substvar-for-external-package Replaces (line 109) ${source:Version} libmariadb-dev-compat -> libmariadbclient-dev
-version-substvar-for-external-package Replaces (line 330) ${source:Version} mariadb-client-core -> mariadb-client-10.11
-version-substvar-for-external-package Replaces (line 330) ${source:Version} mariadb-client-core -> mariadb-server-core-10.11
-version-substvar-for-external-package Replaces (line 481) ${source:Version} mariadb-client -> mariadb-client-10.11
-version-substvar-for-external-package Replaces (line 481) ${source:Version} mariadb-client -> mariadb-client-core-10.11
-version-substvar-for-external-package Replaces (line 481) ${source:Version} mariadb-client -> mariadb-server-10.11
-version-substvar-for-external-package Replaces (line 481) ${source:Version} mariadb-client -> mariadb-server-core-10.11
-version-substvar-for-external-package Replaces (line 626) ${source:Version} mariadb-server-core -> mariadb-client-10.11
-version-substvar-for-external-package Replaces (line 626) ${source:Version} mariadb-server-core -> mariadb-server-10.11
-version-substvar-for-external-package Replaces (line 748) ${source:Version} mariadb-server -> mariadb-client-10.11
-version-substvar-for-external-package Replaces (line 748) ${source:Version} mariadb-server -> mariadb-server-10.11
# ColumnStore not used in Debian, safe to ignore. Reported upstream in https://jira.mariadb.org/browse/MDEV-24124
source-is-missing storage/columnstore/columnstore/utils/jemalloc/libjemalloc.so.2
source-is-missing [storage/columnstore/columnstore/utils/jemalloc/libjemalloc.so.2]
diff --git a/extra/CMakeLists.txt b/extra/CMakeLists.txt
index 5021128ed35..34a83d19e32 100644
--- a/extra/CMakeLists.txt
+++ b/extra/CMakeLists.txt
@@ -91,7 +91,7 @@ MYSQL_ADD_EXECUTABLE(replace replace.c COMPONENT Client)
TARGET_LINK_LIBRARIES(replace mysys)
IF(UNIX)
- MYSQL_ADD_EXECUTABLE(resolve_stack_dump resolve_stack_dump.c)
+ MYSQL_ADD_EXECUTABLE(resolve_stack_dump resolve_stack_dump.c COMPONENT Client)
TARGET_LINK_LIBRARIES(resolve_stack_dump mysys)
MYSQL_ADD_EXECUTABLE(mariadb-waitpid mysql_waitpid.c COMPONENT Client)
diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc
index cc8f8d14094..eaaeb0c36a5 100644
--- a/extra/innochecksum.cc
+++ b/extra/innochecksum.cc
@@ -26,6 +26,8 @@
Published with a permission.
*/
+#define VER "1.0"
+
#include <my_global.h>
#include <stdio.h>
#include <stdlib.h>
@@ -1205,20 +1207,6 @@ static struct my_option innochecksum_options[] = {
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
-/** Print out the version and build information. */
-static void print_version()
-{
-#ifdef DBUG_OFF
- printf("%s Ver %s, for %s (%s)\n",
- my_progname, PACKAGE_VERSION,
- SYSTEM_TYPE, MACHINE_TYPE);
-#else
- printf("%s-debug Ver %s, for %s (%s)\n",
- my_progname, PACKAGE_VERSION,
- SYSTEM_TYPE, MACHINE_TYPE);
-#endif /* DBUG_OFF */
-}
-
static void usage(void)
{
print_version();
diff --git a/extra/mariabackup/CMakeLists.txt b/extra/mariabackup/CMakeLists.txt
index 66293dac31b..1ee2261f878 100644
--- a/extra/mariabackup/CMakeLists.txt
+++ b/extra/mariabackup/CMakeLists.txt
@@ -76,7 +76,7 @@ MYSQL_ADD_EXECUTABLE(mariadb-backup
${PROJECT_BINARY_DIR}/sql/sql_builtin.cc
${PROJECT_SOURCE_DIR}/sql/net_serv.cc
${PROJECT_SOURCE_DIR}/libmysqld/libmysql.c
- COMPONENT backup
+ COMPONENT Backup
)
# Export all symbols on Unix, for better crash callstacks
@@ -99,7 +99,7 @@ MYSQL_ADD_EXECUTABLE(mbstream
xbstream.cc
xbstream_read.cc
xbstream_write.cc
- COMPONENT backup
+ COMPONENT Backup
)
diff --git a/extra/mariabackup/fil_cur.cc b/extra/mariabackup/fil_cur.cc
index e0a4711a2aa..2932fa6d5a6 100644
--- a/extra/mariabackup/fil_cur.cc
+++ b/extra/mariabackup/fil_cur.cc
@@ -199,12 +199,6 @@ xb_fil_cur_open(
return(XB_FIL_CUR_SKIP);
}
- if (srv_file_flush_method == SRV_O_DIRECT
- || srv_file_flush_method == SRV_O_DIRECT_NO_FSYNC) {
-
- os_file_set_nocache(cursor->file, node->name, "OPEN");
- }
-
posix_fadvise(cursor->file, 0, 0, POSIX_FADV_SEQUENTIAL);
cursor->page_size = node->space->physical_size();
diff --git a/extra/mariabackup/xbstream.cc b/extra/mariabackup/xbstream.cc
index 6306806b867..3a3ba55b8b2 100644
--- a/extra/mariabackup/xbstream.cc
+++ b/extra/mariabackup/xbstream.cc
@@ -18,6 +18,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*******************************************************/
+#define VER "1.0"
#include <my_global.h>
#include <my_base.h>
#include <my_getopt.h>
@@ -26,8 +27,8 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
#include "common.h"
#include "xbstream.h"
#include "datasink.h"
+#include <welcome_copyright_notice.h>
-#define XBSTREAM_VERSION "1.0"
#define XBSTREAM_BUFFER_SIZE (10 * 1024 * 1024UL)
#define START_FILE_HASH_SIZE 16
@@ -148,14 +149,6 @@ get_options(int *argc, char ***argv)
static
void
-print_version(void)
-{
- printf("%s Ver %s for %s (%s)\n", my_progname, XBSTREAM_VERSION,
- SYSTEM_TYPE, MACHINE_TYPE);
-}
-
-static
-void
usage(void)
{
print_version();
diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc
index be5bffe8920..05168d35a21 100644
--- a/extra/mariabackup/xtrabackup.cc
+++ b/extra/mariabackup/xtrabackup.cc
@@ -311,6 +311,8 @@ extern const char *innodb_checksum_algorithm_names[];
extern TYPELIB innodb_checksum_algorithm_typelib;
extern const char *innodb_flush_method_names[];
extern TYPELIB innodb_flush_method_typelib;
+/** Ignored option */
+static ulong innodb_flush_method;
static const char *binlog_info_values[] = {"off", "lockless", "on", "auto",
NullS};
@@ -1032,6 +1034,8 @@ enum options_xtrabackup
#if defined __linux__ || defined _WIN32
OPT_INNODB_LOG_FILE_BUFFERING,
#endif
+ OPT_INNODB_DATA_FILE_BUFFERING,
+ OPT_INNODB_DATA_FILE_WRITE_THROUGH,
OPT_INNODB_LOG_FILE_SIZE,
OPT_INNODB_OPEN_FILES,
OPT_XTRA_DEBUG_SYNC,
@@ -1583,10 +1587,10 @@ struct my_option xb_server_options[] =
FALSE, 0, 0, 0, 0, 0},
{"innodb_flush_method", OPT_INNODB_FLUSH_METHOD,
- "With which method to flush data.",
- &srv_file_flush_method, &srv_file_flush_method,
+ "Ignored parameter with no effect",
+ &innodb_flush_method, &innodb_flush_method,
&innodb_flush_method_typelib, GET_ENUM, REQUIRED_ARG,
- IF_WIN(SRV_ALL_O_DIRECT_FSYNC, SRV_O_DIRECT), 0, 0, 0, 0, 0},
+ 4/* O_DIRECT */, 0, 0, 0, 0, 0},
{"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE,
"Redo log buffer size in bytes.",
@@ -1600,6 +1604,16 @@ struct my_option xb_server_options[] =
(G_PTR*) &log_sys.log_buffered, 0, GET_BOOL, NO_ARG,
TRUE, 0, 0, 0, 0, 0},
#endif
+ {"innodb_data_file_buffering", OPT_INNODB_DATA_FILE_BUFFERING,
+ "Whether the file system cache for data files is enabled during --backup",
+ (G_PTR*) &fil_system.buffered,
+ (G_PTR*) &fil_system.buffered, 0, GET_BOOL, NO_ARG,
+ FALSE, 0, 0, 0, 0, 0},
+ {"innodb_data_file_write_through", OPT_INNODB_DATA_FILE_WRITE_THROUGH,
+ "Whether each write to data files writes through",
+ (G_PTR*) &fil_system.write_through,
+ (G_PTR*) &fil_system.write_through, 0, GET_BOOL, NO_ARG,
+ FALSE, 0, 0, 0, 0, 0},
{"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE,
"Ignored for mysqld option compatibility",
(G_PTR*) &srv_log_file_size, (G_PTR*) &srv_log_file_size, 0,
@@ -1653,7 +1667,7 @@ struct my_option xb_server_options[] =
{"innodb_undo_tablespaces", OPT_INNODB_UNDO_TABLESPACES,
"Number of undo tablespaces to use.",
(G_PTR*)&srv_undo_tablespaces, (G_PTR*)&srv_undo_tablespaces,
- 0, GET_UINT, REQUIRED_ARG, 0, 0, 126, 0, 1, 0},
+ 0, GET_UINT, REQUIRED_ARG, 3, 0, 126, 0, 1, 0},
{"innodb_compression_level", OPT_INNODB_COMPRESSION_LEVEL,
"Compression level used for zlib compression.",
@@ -1917,23 +1931,6 @@ xb_get_one_option(const struct my_option *opt,
ADD_PRINT_PARAM_OPT(srv_log_group_home_dir);
break;
- case OPT_INNODB_FLUSH_METHOD:
-#ifdef _WIN32
- /* From: storage/innobase/handler/ha_innodb.cc:innodb_init_params */
- switch (srv_file_flush_method) {
- case SRV_ALL_O_DIRECT_FSYNC + 1 /* "async_unbuffered"="unbuffered" */:
- srv_file_flush_method= SRV_ALL_O_DIRECT_FSYNC;
- break;
- case SRV_ALL_O_DIRECT_FSYNC + 2 /* "normal"="fsync" */:
- srv_file_flush_method= SRV_FSYNC;
- break;
- }
-#endif
- ut_a(srv_file_flush_method
- <= IF_WIN(SRV_ALL_O_DIRECT_FSYNC, SRV_O_DIRECT_NO_FSYNC));
- ADD_PRINT_PARAM_OPT(innodb_flush_method_names[srv_file_flush_method]);
- break;
-
case OPT_INNODB_PAGE_SIZE:
ADD_PRINT_PARAM_OPT(innobase_page_size);
@@ -2172,12 +2169,6 @@ static bool innodb_init_param()
srv_print_verbose_log = verbose ? 2 : 1;
- /* Store the default charset-collation number of this MySQL
- installation */
-
- /* We cannot treat characterset here for now!! */
- data_mysql_default_charset_coll = (ulint)default_charset_info->number;
-
ut_ad(DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number);
#ifdef _WIN32
diff --git a/extra/my_print_defaults.c b/extra/my_print_defaults.c
index 115eb8e7084..6fc9f3f4d9b 100644
--- a/extra/my_print_defaults.c
+++ b/extra/my_print_defaults.c
@@ -21,13 +21,14 @@
**
** Written by Monty
*/
-
+#define VER "1.7"
#include <my_global.h>
#include <my_sys.h>
#include <m_string.h>
#include <my_getopt.h>
#include <my_default.h>
#include <mysql_version.h>
+#include <welcome_copyright_notice.h>
#define load_default_groups mysqld_groups
#include <mysqld_default_groups.h>
@@ -70,16 +71,10 @@ static void cleanup_and_exit(int exit_code)
exit(exit_code);
}
-static void version()
-{
- printf("%s Ver 1.8 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
-}
-
-
static void usage() __attribute__ ((noreturn));
static void usage()
{
- version();
+ print_version();
puts("This software comes with ABSOLUTELY NO WARRANTY. This is free software,\nand you are welcome to modify and redistribute it under the GPL license\n");
puts("Displays the options from option groups of option files, which is useful to see which options a particular tool will use");
printf("Usage: %s [OPTIONS] [groups]\n", my_progname);
@@ -106,7 +101,7 @@ get_one_option(const struct my_option *opt __attribute__((unused)),
verbose++;
break;
case 'V':
- version();
+ print_version();
/* fall through */
case '#':
DBUG_PUSH(argument ? argument : default_dbug_option);
diff --git a/extra/mysql_waitpid.c b/extra/mysql_waitpid.c
index 8f2a5c99395..7409e8f1c69 100644
--- a/extra/mysql_waitpid.c
+++ b/extra/mysql_waitpid.c
@@ -24,7 +24,6 @@
#include <errno.h>
static const char *VER= "1.1";
-static char *progname;
static my_bool verbose;
void usage(void);
@@ -50,7 +49,7 @@ get_one_option(const struct my_option *opt,
{
switch(opt->id) {
case 'V':
- printf("%s version %s by Jani Tolonen\n", progname, VER);
+ printf("%s version %s by Jani Tolonen\n", my_progname, VER);
exit(0);
case 'I':
case '?':
@@ -65,7 +64,7 @@ int main(int argc, char *argv[])
{
int pid= 0, t= 0, sig= 0;
- progname= argv[0];
+ MY_INIT(argv[0]);
if (handle_options(&argc, &argv, my_long_options, get_one_option))
exit(-1);
@@ -96,8 +95,8 @@ int main(int argc, char *argv[])
void usage(void)
{
- printf("%s version %s by Jani Tolonen\n\n", progname, VER);
- printf("usage: %s [options] #pid #time\n\n", progname);
+ printf("%s version %s by Jani Tolonen\n\n", my_progname, VER);
+ printf("usage: %s [options] #pid #time\n\n", my_progname);
printf("Description: Waits for a program, which program id is #pid, to\n");
printf("terminate within #time seconds. If the program terminates within\n");
printf("this time, or if the #pid no longer exists, value 0 is returned.\n");
diff --git a/extra/perror.c b/extra/perror.c
index ee6e362e06b..c9e9ae6e334 100644
--- a/extra/perror.c
+++ b/extra/perror.c
@@ -16,7 +16,7 @@
/* Return error-text for system error messages and handler messages */
-#define PERROR_VERSION "2.11"
+#define VER "2.11"
#include <my_global.h>
#include <my_sys.h>
@@ -76,13 +76,6 @@ static HA_ERRORS ha_errlist[]=
};
-static void print_version(void)
-{
- printf("%s Ver %s, for %s (%s)\n",my_progname,PERROR_VERSION,
- SYSTEM_TYPE,MACHINE_TYPE);
-}
-
-
static void usage(void)
{
print_version();
diff --git a/extra/replace.c b/extra/replace.c
index 8b20f812be0..81c7f484bdf 100644
--- a/extra/replace.c
+++ b/extra/replace.c
@@ -41,12 +41,13 @@
Written by Monty.
fill_buffer_retaining() is taken from gnu-grep and modified.
*/
-
+#define VER "1.4"
#include <my_global.h>
#include <m_ctype.h>
#include <my_sys.h>
#include <m_string.h>
#include <errno.h>
+#include <welcome_copyright_notice.h>
#define PC_MALLOC 256 /* Bytes for pointers */
#define PS_MALLOC 512 /* Bytes for data */
@@ -176,8 +177,7 @@ static int static_get_options(int *argc, char***argv)
case 'I':
case '?':
help=1; /* Help text written */
- printf("%s Ver 1.4 for %s at %s\n",my_progname,SYSTEM_TYPE,
- MACHINE_TYPE);
+ print_version();
if (version)
break;
puts("This software comes with ABSOLUTELY NO WARRANTY. This is free software,\nand you are welcome to modify and redistribute it under the GPL license\n");
diff --git a/extra/resolve_stack_dump.c b/extra/resolve_stack_dump.c
index fe2f297fd33..2b09f5bf1fb 100644
--- a/extra/resolve_stack_dump.c
+++ b/extra/resolve_stack_dump.c
@@ -18,6 +18,7 @@
versions into symbolic names. By Sasha Pachev <sasha@mysql.com>
*/
+#define VER "1.4"
#include <my_global.h>
#include <m_ctype.h>
#include <my_sys.h>
@@ -25,11 +26,11 @@
#include <mysql_version.h>
#include <errno.h>
#include <my_getopt.h>
+#include <welcome_copyright_notice.h>
#define INIT_SYM_TABLE 4096
#define INC_SYM_TABLE 4096
#define MAX_SYM_SIZE 128
-#define DUMP_VERSION "1.4"
#define HEX_INVALID (uchar)255
typedef ulong my_long_addr_t ; /* at some point, we need to fix configure
@@ -65,13 +66,6 @@ static struct my_option my_long_options[] =
static void verify_sort();
static void clean_up();
-static void print_version(void)
-{
- printf("%s Ver %s Distrib %s, for %s (%s)\n",my_progname,DUMP_VERSION,
- MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE);
-}
-
-
static void usage()
{
print_version();
diff --git a/extra/resolveip.c b/extra/resolveip.c
index 890912d9850..d964802d3b3 100644
--- a/extra/resolveip.c
+++ b/extra/resolveip.c
@@ -16,7 +16,7 @@
/* Resolves IP's to hostname and hostnames to IP's */
-#define RESOLVE_VERSION "2.3"
+#define VER "2.3"
#include <my_global.h>
#include <m_ctype.h>
@@ -31,6 +31,7 @@
#endif
#include <my_net.h>
#include <my_getopt.h>
+#include <welcome_copyright_notice.h>
#if !defined(_AIX) && !defined(h_errno)
extern int h_errno;
@@ -52,13 +53,6 @@ static struct my_option my_long_options[] =
};
-static void print_version(void)
-{
- printf("%s Ver %s, for %s (%s)\n",my_progname,RESOLVE_VERSION,
- SYSTEM_TYPE,MACHINE_TYPE);
-}
-
-
static void usage(void)
{
print_version();
diff --git a/include/my_compare.h b/include/my_compare.h
index c2cb9ae46b9..62bb6ac0ed4 100644
--- a/include/my_compare.h
+++ b/include/my_compare.h
@@ -154,6 +154,5 @@ typedef enum check_result {
typedef check_result_t (*index_cond_func_t)(void *param);
typedef check_result_t (*rowid_filter_func_t)(void *param);
-typedef int (*rowid_filter_is_active_func_t)(void *param);
#endif /* _my_compare_h */
diff --git a/include/my_getopt.h b/include/my_getopt.h
index ffff706e015..26f21bd632e 100644
--- a/include/my_getopt.h
+++ b/include/my_getopt.h
@@ -100,7 +100,6 @@ typedef my_bool (*my_get_one_option)(const struct my_option *, const char *, con
typedef void *(*my_getopt_value)(const char *, uint, const struct my_option *,
int *);
-
extern char *disabled_my_option;
extern char *autoset_my_option;
extern my_bool my_getopt_print_errors;
diff --git a/include/my_global.h b/include/my_global.h
index 54f76bf5d91..952e65c9728 100644
--- a/include/my_global.h
+++ b/include/my_global.h
@@ -676,6 +676,7 @@ typedef SOCKET_SIZE_TYPE size_socket;
Io buffer size; Must be a power of 2 and a multiple of 512. May be
smaller what the disk page size. This influences the speed of the
isam btree library. eg to big to slow.
+ 4096 is a common block size on SSDs.
*/
#define IO_SIZE 4096U
/*
diff --git a/include/my_sys.h b/include/my_sys.h
index 41bb7f8575d..3180a8c37da 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -341,6 +341,14 @@ typedef struct st_dynamic_array
myf malloc_flags;
} DYNAMIC_ARRAY;
+
+typedef struct st_dynamic_array_append
+{
+ DYNAMIC_ARRAY *array;
+ uchar *pos, *end;
+} DYNAMIC_ARRAY_APPEND;
+
+
typedef struct st_my_tmpdir
{
DYNAMIC_ARRAY full_list;
@@ -770,7 +778,7 @@ extern int flush_write_cache(RECORD_CACHE *info);
extern void handle_recived_signals(void);
extern sig_handler my_set_alarm_variable(int signo);
-extern my_bool radixsort_is_appliccable(uint n_items, size_t size_of_element);
+extern my_bool radixsort_is_applicable(uint n_items, size_t size_of_element);
extern void my_string_ptr_sort(uchar *base,uint items,size_t size);
extern void radixsort_for_str_ptr(uchar* base[], uint number_of_elements,
size_t size_of_element,uchar *buffer[]);
@@ -844,6 +852,10 @@ extern void freeze_size(DYNAMIC_ARRAY *array);
#define push_dynamic(A,B) insert_dynamic((A),(B))
#define reset_dynamic(array) ((array)->elements= 0)
#define sort_dynamic(A,cmp) my_qsort((A)->buffer, (A)->elements, (A)->size_of_element, (cmp))
+extern void init_append_dynamic(DYNAMIC_ARRAY_APPEND *append,
+ DYNAMIC_ARRAY *array);
+extern my_bool append_dynamic(DYNAMIC_ARRAY_APPEND *append,
+ const void * element);
extern my_bool init_dynamic_string(DYNAMIC_STRING *str, const char *init_str,
size_t init_alloc,size_t alloc_increment);
diff --git a/include/my_tracker.h b/include/my_tracker.h
new file mode 100644
index 00000000000..88cefe5ef5d
--- /dev/null
+++ b/include/my_tracker.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2022, MariaDB Corporation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
+
+/*
+ Trivial framework to add a tracker to a C function
+*/
+
+#include "my_rdtsc.h"
+
+struct my_time_tracker
+{
+ ulonglong counter;
+ ulonglong cycles;
+};
+
+#ifdef HAVE_TIME_TRACKING
+#define START_TRACKING ulonglong my_start_time= my_timer_cycles()
+#define END_TRACKING(var) \
+ { \
+ ulonglong my_end_time= my_timer_cycles(); \
+ (var)->counter++; \
+ (var)->cycles+= (unlikely(my_end_time < my_start_time) ? \
+ my_end_time - my_start_time + ULONGLONG_MAX : \
+ my_end_time - my_start_time); \
+ }
+#else
+#define START_TRACKING
+#define END_TRACKING(var) do { } while(0)
+#endif
diff --git a/include/myisam.h b/include/myisam.h
index c90026bfc7a..dd4f9084b00 100644
--- a/include/myisam.h
+++ b/include/myisam.h
@@ -435,6 +435,8 @@ int thr_write_keys(MI_SORT_PARAM *sort_param);
int sort_write_record(MI_SORT_PARAM *sort_param);
int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, ulonglong);
my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows);
+struct OPTIMIZER_COSTS;
+void myisam_update_optimizer_costs(struct OPTIMIZER_COSTS *costs);
#ifdef __cplusplus
}
diff --git a/include/mysql_com.h b/include/mysql_com.h
index a2befe2d324..b0e96caddf7 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -41,27 +41,6 @@
#define MYSQL50_TABLE_NAME_PREFIX_LENGTH (sizeof(MYSQL50_TABLE_NAME_PREFIX)-1)
#define SAFE_NAME_LEN (NAME_LEN + MYSQL50_TABLE_NAME_PREFIX_LENGTH)
-/*
- MDEV-4088
-
- MySQL (and MariaDB 5.x before the fix) was using the first character of the
- server version string (as sent in the first handshake protocol packet) to
- decide on the replication event formats. And for 10.x the first character
- is "1", which the slave thought comes from some ancient 1.x version
- (ignoring the fact that the first ever MySQL version was 3.x).
-
- To support replication to these old clients, we fake the version in the
- first handshake protocol packet to start from "5.5.5-" (for example,
- it might be "5.5.5-10.0.1-MariaDB-debug-log".
-
- On the client side we remove this fake version prefix to restore the
- correct server version. The version "5.5.5" did not support
- pluggable authentication, so any version starting from "5.5.5-" and
- claiming to support pluggable auth, must be using this fake prefix.
-*/
-/* this version must be the one that *does not* support pluggable auth */
-#define RPL_VERSION_HACK "5.5.5-"
-
#define SERVER_VERSION_LENGTH 60
#define SQLSTATE_LENGTH 5
#define LIST_PROCESS_HOST_LEN 64
diff --git a/include/mysys_err.h b/include/mysys_err.h
index e0e97d0284a..d115b5ddb70 100644
--- a/include/mysys_err.h
+++ b/include/mysys_err.h
@@ -73,7 +73,8 @@ extern const char *globerrs[]; /* my_error_messages is here */
#define EE_PERM_LOCK_MEMORY 37
#define EE_MEMCNTL 38
#define EE_DUPLICATE_CHARSET 39
-#define EE_ERROR_LAST 39 /* Copy last error nr */
+#define EE_NAME_DEPRECATED 40
+#define EE_ERROR_LAST 40 /* Copy last error nr */
/* Add error numbers before EE_ERROR_LAST and change it accordingly. */
diff --git a/include/welcome_copyright_notice.h b/include/welcome_copyright_notice.h
index 22d8d204268..e50e9a02d66 100644
--- a/include/welcome_copyright_notice.h
+++ b/include/welcome_copyright_notice.h
@@ -27,4 +27,12 @@
"Copyright (c) " first_year ", " COPYRIGHT_NOTICE_CURRENT_YEAR \
", Oracle, MariaDB Corporation Ab and others.\n"
+#ifdef VER
+static inline void print_version()
+{
+ /* NOTE mysql.cc is not using this function! */
+ printf("%s from %s, client %s for %s (%s)\n",
+ my_progname, MYSQL_SERVER_VERSION, VER, SYSTEM_TYPE, MACHINE_TYPE);
+}
+#endif
#endif /* _welcome_copyright_notice_h_ */
diff --git a/man/CMakeLists.txt b/man/CMakeLists.txt
index e6d7ab32e88..ae308a00392 100644
--- a/man/CMakeLists.txt
+++ b/man/CMakeLists.txt
@@ -13,85 +13,31 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
-SET(MAN1_WSREP wsrep_sst_rsync.1 wsrep_sst_common.1 wsrep_sst_mariabackup.1
- wsrep_sst_mysqldump.1 wsrep_sst_rsync_wan.1 galera_recovery.1 galera_new_cluster.1)
-SET(MAN1_SERVER innochecksum.1 myisam_ftdump.1 myisamchk.1
- aria_chk.1 aria_dump_log.1 aria_ftdump.1 aria_pack.1 aria_read_log.1
- aria_s3_copy.1
- myisamlog.1 myisampack.1 mysql.server.1 mariadb-conv.1
- mysql_fix_extensions.1
- mysql_install_db.1
- mysql_upgrade.1
- mysqld_multi.1 mysqld_safe.1
- resolveip.1 mariadb-service-convert.1
- mysqld_safe_helper.1
- mysql_ldb.1 myrocks_hotbackup.1
- mbstream.1 mariabackup.1)
-IF(WITH_WSREP)
- SET(MAN1_SERVER ${MAN1_SERVER} ${MAN1_WSREP})
-ENDIF()
-SET(MAN8_SERVER mysqld.8)
-SET(MAN1_CLIENT msql2mysql.1 mysql.1 mysql_find_rows.1 mysql_waitpid.1
- mysqldumpslow.1
- mysqlaccess.1 mysqladmin.1 mysqlbinlog.1 mysqlcheck.1
- mysqldump.1 mysqlimport.1 mysqlshow.1 mysqlslap.1 mytop.1
- mysql_plugin.1 mysql_embedded.1 my_print_defaults.1
- mysqlhotcopy.1 mysql_setpermission.1 mysql_tzinfo_to_sql.1
- mysql_convert_table_format.1 replace.1 mysql_secure_installation.1
- perror.1 resolve_stack_dump.1)
-SET(MAN1_DEVEL mysql_config.1)
-SET(MAN1_TEST mysql-stress-test.pl.1 mysql-test-run.pl.1 mysql_client_test.1
- mysqltest.1 mysqltest_embedded.1 mysql_client_test_embedded.1
- my_safe_process.1)
IF(NOT WITHOUT_SERVER)
- INSTALL(FILES ${MAN1_SERVER} DESTINATION ${INSTALL_MANDIR}/man1 COMPONENT ManPagesServer)
- INSTALL(FILES ${MAN8_SERVER} DESTINATION ${INSTALL_MANDIR}/man8 COMPONENT ManPagesServer)
+ INSTALL_MANPAGES(Server
+ innochecksum.1 myisam_ftdump.1 myisamchk.1 aria_chk.1 aria_dump_log.1
+ aria_ftdump.1 aria_pack.1 aria_read_log.1 myisamlog.1
+ myisampack.1 mysql.server.1 mariadb-conv.1 mariadb-fix-extensions.1
+ mariadb-install-db.1 mariadb-upgrade.1 mariadbd-multi.1 mariadbd-safe.1
+ resolveip.1 mariadb-service-convert.1 mariadbd-safe-helper.1 mariadbd.8)
+ INSTALL_MANPAGES(Backup mbstream.1 mariadb-backup.1)
+ IF(WITH_WSREP)
+ INSTALL_MANPAGES(Server
+ wsrep_sst_rsync.1 wsrep_sst_common.1 wsrep_sst_mariabackup.1
+ wsrep_sst_mysqldump.1 wsrep_sst_rsync_wan.1 galera_recovery.1
+ galera_new_cluster.1)
+ ENDIF()
ENDIF()
-
-INSTALL(FILES ${MAN1_CLIENT} DESTINATION ${INSTALL_MANDIR}/man1 COMPONENT ManPagesClient)
-INSTALL(FILES ${MAN1_DEVEL} DESTINATION ${INSTALL_MANDIR}/man1 COMPONENT ManPagesDevelopment)
-INSTALL(FILES ${MAN1_TEST} DESTINATION ${INSTALL_MANDIR}/man1 COMPONENT ManPagesTest)
-
-macro(MARIADB_SYMLINK_MANPAGE)
- list(LENGTH MARIADB_SYMLINK_TOS _len)
- math(EXPR _listlen "${_len}-1")
-
- foreach(_index RANGE ${_listlen})
- list(GET MARIADB_SYMLINK_TOS ${_index} _name)
-
- set(_manname "${_name}")
- list(FIND MAN1_SERVER ${_manname}.1 _iman1server)
- list(FIND MAN8_SERVER ${_manname}.8 _iman8server)
- list(FIND MAN1_CLIENT ${_manname}.1 _iman1client)
- list(FIND MAN1_DEVEL ${_manname}.1 _iman1devel)
- list(FIND MAN1_TEST ${_manname}.1 _iman1test)
-
- if (${_iman1client} GREATER -1)
- MARIADB_SYMLINK_MANPAGE_CREATE(${_manname} ${_index} 1 "man1" ManPagesClient)
- endif()
- if (${_iman1devel} GREATER -1)
- MARIADB_SYMLINK_MANPAGE_CREATE(${_manname} ${_index} 1 "man1" ManPagesDevelopment)
- endif()
- if (${_iman1test} GREATER -1)
- MARIADB_SYMLINK_MANPAGE_CREATE(${_manname} ${_index} 1 "man1" ManPagesTest)
- endif()
- if (WITHOUT_SERVER)
- continue()
- endif()
- if (${_iman1server} GREATER -1)
- MARIADB_SYMLINK_MANPAGE_CREATE(${_manname} ${_index} 1 "man1" ManPagesServer)
- endif()
- if (${_iman8server} GREATER -1)
- MARIADB_SYMLINK_MANPAGE_CREATE(${_manname} ${_index} 8 "man8" ManPagesServer)
- endif()
- endforeach(_index)
-endmacro(MARIADB_SYMLINK_MANPAGE)
-
-macro(MARIADB_SYMLINK_MANPAGE_CREATE mysqlname index mannr dir comp)
- LIST(GET MARIADB_SYMLINK_FROMS ${index} _mariadbname)
- SET(dest "${CMAKE_CURRENT_BINARY_DIR}/${_mariadbname}.${mannr}")
- FILE(WRITE ${dest} ".so ${dir}/${mysqlname}.${mannr}")
- INSTALL(FILES ${dest} DESTINATION ${INSTALL_MANDIR}/${dir} COMPONENT ${comp})
-endmacro(MARIADB_SYMLINK_MANPAGE_CREATE)
-
-MARIADB_SYMLINK_MANPAGE()
+INSTALL_MANPAGES(Client
+ msql2mysql.1 mariadb.1 mariadb-find-rows.1 mariadb-waitpid.1
+ mariadb-dumpslow.1 mariadb-access.1 mariadb-admin.1 mariadb-binlog.1
+ mariadb-check.1 mariadb-dump.1 mariadb-import.1 mariadb-show.1
+ mariadb-slap.1 mytop.1 mariadb-plugin.1 mariadb-embedded.1
+ my_print_defaults.1 mariadb-hotcopy.1 mariadb-setpermission.1
+ mariadb-tzinfo-to-sql.1 mariadb-convert-table-format.1 replace.1
+ mariadb-secure-installation.1 perror.1 resolve_stack_dump.1)
+INSTALL_MANPAGES(Development mariadb_config.1)
+INSTALL_MANPAGES(Test
+ mysql-stress-test.pl.1 mysql-test-run.pl.1 mariadb-client-test.1
+ mariadb-test.1 mariadb-test-embedded.1 mariadb-client-test-embedded.1
+ my_safe_process.1)
diff --git a/man/mysqlaccess.1 b/man/mariadb-access.1
index 4d9e4902dae..4d9e4902dae 100644
--- a/man/mysqlaccess.1
+++ b/man/mariadb-access.1
diff --git a/man/mysqladmin.1 b/man/mariadb-admin.1
index da25123a219..da25123a219 100644
--- a/man/mysqladmin.1
+++ b/man/mariadb-admin.1
diff --git a/man/mariabackup.1 b/man/mariadb-backup.1
index 4ee0a2f09d4..4ee0a2f09d4 100644
--- a/man/mariabackup.1
+++ b/man/mariadb-backup.1
diff --git a/man/mysqlbinlog.1 b/man/mariadb-binlog.1
index 8dd814d34ab..8dd814d34ab 100644
--- a/man/mysqlbinlog.1
+++ b/man/mariadb-binlog.1
diff --git a/man/mysqlcheck.1 b/man/mariadb-check.1
index ae9937db4d3..ae9937db4d3 100644
--- a/man/mysqlcheck.1
+++ b/man/mariadb-check.1
diff --git a/man/mariadb-client-test-embedded.1 b/man/mariadb-client-test-embedded.1
new file mode 100644
index 00000000000..327a06c5af9
--- /dev/null
+++ b/man/mariadb-client-test-embedded.1
@@ -0,0 +1 @@
+.so man1/mariadb-client-test.1
diff --git a/man/mysql_client_test.1 b/man/mariadb-client-test.1
index 9d23903768c..9d23903768c 100644
--- a/man/mysql_client_test.1
+++ b/man/mariadb-client-test.1
diff --git a/man/mysql_convert_table_format.1 b/man/mariadb-convert-table-format.1
index e1deb7df54d..e1deb7df54d 100644
--- a/man/mysql_convert_table_format.1
+++ b/man/mariadb-convert-table-format.1
diff --git a/man/mysqldump.1 b/man/mariadb-dump.1
index 9bb6fdadca1..9bb6fdadca1 100644
--- a/man/mysqldump.1
+++ b/man/mariadb-dump.1
diff --git a/man/mysqldumpslow.1 b/man/mariadb-dumpslow.1
index 4f82544dc14..4f82544dc14 100644
--- a/man/mysqldumpslow.1
+++ b/man/mariadb-dumpslow.1
diff --git a/man/mariadb-embedded.1 b/man/mariadb-embedded.1
new file mode 100644
index 00000000000..78fc18b313f
--- /dev/null
+++ b/man/mariadb-embedded.1
@@ -0,0 +1 @@
+.so man1/mariadb.1
diff --git a/man/mysql_find_rows.1 b/man/mariadb-find-rows.1
index f0220250bf7..f0220250bf7 100644
--- a/man/mysql_find_rows.1
+++ b/man/mariadb-find-rows.1
diff --git a/man/mysql_fix_extensions.1 b/man/mariadb-fix-extensions.1
index f043a6e4298..b7bec412b36 100644
--- a/man/mysql_fix_extensions.1
+++ b/man/mariadb-fix-extensions.1
@@ -19,6 +19,8 @@ mariadb-fix-extensions \- normalize table file name extensions (mysql_fix_extens
\fBmysql_fix_extensions \fR\fB\fIdata_dir\fR\fR
.SH "DESCRIPTION"
.PP
+This script is deprecated and will be removed in a later release.
+.PP
\fBmysql_fix_extensions\fR
converts the extensions for
MyISAM
diff --git a/man/mysqlhotcopy.1 b/man/mariadb-hotcopy.1
index 9e545c784c0..9e545c784c0 100644
--- a/man/mysqlhotcopy.1
+++ b/man/mariadb-hotcopy.1
diff --git a/man/mysqlimport.1 b/man/mariadb-import.1
index bc84ed54179..bc84ed54179 100644
--- a/man/mysqlimport.1
+++ b/man/mariadb-import.1
diff --git a/man/mysql_install_db.1 b/man/mariadb-install-db.1
index 1046a77c7e0..1046a77c7e0 100644
--- a/man/mysql_install_db.1
+++ b/man/mariadb-install-db.1
diff --git a/man/mysql_plugin.1 b/man/mariadb-plugin.1
index d01fd26634c..d01fd26634c 100644
--- a/man/mysql_plugin.1
+++ b/man/mariadb-plugin.1
diff --git a/man/mysql_secure_installation.1 b/man/mariadb-secure-installation.1
index 9c788238d9c..9c788238d9c 100644
--- a/man/mysql_secure_installation.1
+++ b/man/mariadb-secure-installation.1
diff --git a/man/mysql_setpermission.1 b/man/mariadb-setpermission.1
index 1510981ddeb..1510981ddeb 100644
--- a/man/mysql_setpermission.1
+++ b/man/mariadb-setpermission.1
diff --git a/man/mysqlshow.1 b/man/mariadb-show.1
index dd0f97ed407..dd0f97ed407 100644
--- a/man/mysqlshow.1
+++ b/man/mariadb-show.1
diff --git a/man/mysqlslap.1 b/man/mariadb-slap.1
index f14ab8fa3fa..f14ab8fa3fa 100644
--- a/man/mysqlslap.1
+++ b/man/mariadb-slap.1
diff --git a/man/mariadb-test-embedded.1 b/man/mariadb-test-embedded.1
new file mode 100644
index 00000000000..65bf13872da
--- /dev/null
+++ b/man/mariadb-test-embedded.1
@@ -0,0 +1 @@
+.so man1/mariadb-test.1
diff --git a/man/mysqltest.1 b/man/mariadb-test.1
index 0f1e138a448..0f1e138a448 100644
--- a/man/mysqltest.1
+++ b/man/mariadb-test.1
diff --git a/man/mysql_tzinfo_to_sql.1 b/man/mariadb-tzinfo-to-sql.1
index 08681335c47..08681335c47 100644
--- a/man/mysql_tzinfo_to_sql.1
+++ b/man/mariadb-tzinfo-to-sql.1
diff --git a/man/mysql_upgrade.1 b/man/mariadb-upgrade.1
index c4041f281a5..c4041f281a5 100644
--- a/man/mysql_upgrade.1
+++ b/man/mariadb-upgrade.1
diff --git a/man/mysql_waitpid.1 b/man/mariadb-waitpid.1
index 9153d246a03..9153d246a03 100644
--- a/man/mysql_waitpid.1
+++ b/man/mariadb-waitpid.1
diff --git a/man/mysql.1 b/man/mariadb.1
index 38ab116a2ef..dd62e92e879 100644
--- a/man/mysql.1
+++ b/man/mariadb.1
@@ -1630,28 +1630,29 @@ SELECT
statements when using
\fB\-\-safe\-updates\fR\&. (Default value is 1,000\&.)
.RE
-.\" MYSQL_HISTFILE environment variable
-.\" environment variable: MYSQL_HISTFILE
+.\" MARIADB_HISTFILE environment variable
+.\" environment variable: MARIADB_HISTFILE
.\" HOME environment variable
.\" environment variable: HOME
.\" mysql history file
.\" command-line history: mysql
-.\" .mysql_history file
+.\" .mariadb_history file
.PP
On Unix, the
\fBmysql\fR
client writes a record of executed statements to a history file\&. By default, this file is named
-\&.mysql_history
-and is created in your home directory\&. To specify a different file, set the value of the
-MYSQL_HISTFILE
-environment variable\&.
+\&.mariadb_history
+and is created in your home directory\&. For backwards compatibility \&.mysql_history will be used if present and
+\&.mariadb_history is missing\&. To specify a different file, set the value of the
+MARIADB_HISTFILE
+environment variable\&. The environment variable MYSQL_HISTFILE will be used if MARIADB_HISTFILE isn't present\&.
.PP
The
-\&.mysql_history
+\&.mariadb_history
should be protected with a restrictive access mode because sensitive information might be written to it, such as the text of SQL statements that contain passwords\&.
.PP
If you do not want to maintain a history file, first remove
-\&.mysql_history
+\&.mariadb_history
if it exists, and then use either of the following techniques:
.sp
.RS 4
@@ -1663,7 +1664,7 @@ if it exists, and then use either of the following techniques:
.IP \(bu 2.3
.\}
Set the
-MYSQL_HISTFILE
+MARIADB_HISTFILE
variable to
/dev/null\&. To cause this setting to take effect each time you log in, put the setting in one of your shell\'s startup files\&.
.RE
@@ -1677,7 +1678,7 @@ variable to
.IP \(bu 2.3
.\}
Create
-\&.mysql_history
+\&.mariadb_history
as a symbolic link to
/dev/null:
.sp
@@ -1685,7 +1686,7 @@ as a symbolic link to
.RS 4
.\}
.nf
-shell> \fBln \-s /dev/null $HOME/\&.mysql_history\fR
+shell> \fBln \-s /dev/null $HOME/\&.mariadb_history\fR
.fi
.if n \{\
.RE
diff --git a/man/mysql_config.1 b/man/mariadb_config.1
index 334478ce32c..334478ce32c 100644
--- a/man/mysql_config.1
+++ b/man/mariadb_config.1
diff --git a/man/mysqld_multi.1 b/man/mariadbd-multi.1
index e195eb59e2e..e195eb59e2e 100644
--- a/man/mysqld_multi.1
+++ b/man/mariadbd-multi.1
diff --git a/man/mysqld_safe_helper.1 b/man/mariadbd-safe-helper.1
index e25447e2c11..e25447e2c11 100644
--- a/man/mysqld_safe_helper.1
+++ b/man/mariadbd-safe-helper.1
diff --git a/man/mysqld_safe.1 b/man/mariadbd-safe.1
index 2e58df90c69..2e58df90c69 100644
--- a/man/mysqld_safe.1
+++ b/man/mariadbd-safe.1
diff --git a/man/mysqld.8 b/man/mariadbd.8
index aee253fc220..aee253fc220 100644
--- a/man/mysqld.8
+++ b/man/mariadbd.8
diff --git a/man/mysql_client_test_embedded.1 b/man/mysql_client_test_embedded.1
deleted file mode 100644
index 41f528017b3..00000000000
--- a/man/mysql_client_test_embedded.1
+++ /dev/null
@@ -1 +0,0 @@
-.so man1/mysql_client_test.1
diff --git a/man/mysql_embedded.1 b/man/mysql_embedded.1
deleted file mode 100644
index 735c4e05ae0..00000000000
--- a/man/mysql_embedded.1
+++ /dev/null
@@ -1 +0,0 @@
-.so man1/mysql.1
diff --git a/man/mysqltest_embedded.1 b/man/mysqltest_embedded.1
deleted file mode 100644
index a00489327b3..00000000000
--- a/man/mysqltest_embedded.1
+++ /dev/null
@@ -1 +0,0 @@
-.so man1/mysqltest.1
diff --git a/mysql-test/include/analyze-format.inc b/mysql-test/include/analyze-format.inc
index 7d1c48f3e6f..e65450ff001 100644
--- a/mysql-test/include/analyze-format.inc
+++ b/mysql-test/include/analyze-format.inc
@@ -1,3 +1,3 @@
# The time on ANALYSE FORMAT=JSON is rather variable
---replace_regex /("(r_total_time_ms|r_table_time_ms|r_other_time_ms|r_buffer_size|r_filling_time_ms|r_query_time_in_progress_ms)": )[^, \n]*/\1"REPLACED"/
+--replace_regex /("(r_total_time_ms|r_table_time_ms|r_other_time_ms|r_buffer_size|r_filling_time_ms|r_query_time_in_progress_ms|r_unpack_time_ms|cost)": )[^, \n]*/\1"REPLACED"/
diff --git a/mysql-test/include/analyze-no-filtered.inc b/mysql-test/include/analyze-no-filtered.inc
new file mode 100644
index 00000000000..eb1663167b2
--- /dev/null
+++ b/mysql-test/include/analyze-no-filtered.inc
@@ -0,0 +1,2 @@
+--replace_regex /("(filtered|r_total_time_ms|r_table_time_ms|r_other_time_ms|r_buffer_size|r_filling_time_ms|r_query_time_in_progress_ms|r_unpack_time_ms|cost)": )[^, \n]*/\1"REPLACED"/
+
diff --git a/mysql-test/include/check-testcase.test b/mysql-test/include/check-testcase.test
index 078f6572bed..1038ff30c11 100644
--- a/mysql-test/include/check-testcase.test
+++ b/mysql-test/include/check-testcase.test
@@ -97,7 +97,7 @@ select count(*) from mysql.proc;
call mtr.check_testcase();
let $datadir=`select @@datadir`;
-list_files $datadir mysql_upgrade_info;
+list_files $datadir mariadb_upgrade_info;
list_files $datadir/test #sql*;
list_files $datadir/mysql #sql*;
diff --git a/mysql-test/include/common-tests.inc b/mysql-test/include/common-tests.inc
index 9c6b29858c8..9b54b049f8b 100644
--- a/mysql-test/include/common-tests.inc
+++ b/mysql-test/include/common-tests.inc
@@ -13,6 +13,11 @@
drop table if exists t1,t2,t3,t4;
--enable_warnings
+# We have to use Aria instead of MyISAM as MyISAM has a very high row
+# access cost which causes some tests to use use join_cache instead of eq_ref
+
+set @@default_storage_engine="aria";
+
CREATE TABLE t1 (
Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
@@ -1429,7 +1434,7 @@ set tmp_memory_table_size=default;
select distinct fld3,repeat("a",length(fld3)),count(*) from t2 group by companynr,fld3 limit 100,10;
#
-# A big order by that should trigger a merge in filesort
+# A big order by that should traigger a merge in filesort
#
select distinct companynr,rtrim(space(512+companynr)) from t3 order by 1,2;
@@ -1446,9 +1451,9 @@ select distinct fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2nr orde
explain select t3.t2nr,fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2nr order by t3.t2nr,fld3;
-#
-# Some test with ORDER BY and limit
-#
+--echo #
+--echo # Some test with ORDER BY and limit
+--echo #
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period limit 10;
@@ -1501,7 +1506,7 @@ create table t4 (
companyname char(30) NOT NULL default '',
PRIMARY KEY (companynr),
UNIQUE KEY companyname(companyname)
-) ENGINE=MyISAM MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
+) ENGINE=aria MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
--disable_query_log
INSERT INTO t4 (companynr, companyname) VALUES (29,'company 1');
@@ -1555,8 +1560,9 @@ explain select companynr,companyname from t2 left join t4 using (companynr) wher
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr is null;
delete from t2 where fld1=999999;
-#
-# Test left join optimization
+--echo #
+--echo # Test left join optimization
+--echo #
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0;
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0;
diff --git a/mysql-test/include/ctype_numconv.inc b/mysql-test/include/ctype_numconv.inc
index 6c7ac3b69fe..00364fd3406 100644
--- a/mysql-test/include/ctype_numconv.inc
+++ b/mysql-test/include/ctype_numconv.inc
@@ -1739,6 +1739,7 @@ CREATE TABLE t1 (
date_column DATE DEFAULT NULL,
KEY(date_column));
INSERT INTO t1 VALUES (1,'2010-09-01'),(2,'2010-10-01');
+INSERT INTO t1 VALUES (3,'2012-09-01'),(4,'2012-10-01'),(5,'2012-10-01');
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
ALTER TABLE t1 MODIFY date_column DATETIME DEFAULT NULL;
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
diff --git a/mysql-test/include/explain-no-costs-filtered.inc b/mysql-test/include/explain-no-costs-filtered.inc
new file mode 100644
index 00000000000..585d8b3fdcc
--- /dev/null
+++ b/mysql-test/include/explain-no-costs-filtered.inc
@@ -0,0 +1 @@
+--replace_regex /("(cost|filtered)": )[^, \n]*/\1"REPLACED"/
diff --git a/mysql-test/include/explain-no-costs.inc b/mysql-test/include/explain-no-costs.inc
new file mode 100644
index 00000000000..f2f362b8cbe
--- /dev/null
+++ b/mysql-test/include/explain-no-costs.inc
@@ -0,0 +1 @@
+--replace_regex /("(cost)": )[^, \n]*/\1"COST_REPLACED"/
diff --git a/mysql-test/include/explain_non_select.inc b/mysql-test/include/explain_non_select.inc
index d22310c9813..8e60f582f9e 100644
--- a/mysql-test/include/explain_non_select.inc
+++ b/mysql-test/include/explain_non_select.inc
@@ -1,6 +1,7 @@
# This file is a collection of regression and coverage tests
# for WL#4897: Add EXPLAIN INSERT/UPDATE/DELETE.
+-- source include/have_sequence.inc
-- disable_query_log
-- disable_result_log
# SET GLOBAL innodb_stats_persistent=0;
@@ -73,15 +74,18 @@ INSERT INTO t2 VALUES (1), (2), (3);
--source include/explain_utils.inc
DROP TABLE t1, t2;
---echo #7
+--echo #7a
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (2), (3);
CREATE TABLE t2 (b INT);
-INSERT INTO t2 VALUES (1), (2), (3);
---let $query = UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT b FROM t2 WHERE t2.b < 3)
---let $select = SELECT * FROM t1, t2 WHERE a IN (SELECT b FROM t2 WHERE t2.b < 3)
+INSERT INTO t2 VALUES (1), (2), (3), (1000);
+CREATE TABLE t3 like t2;
+insert into t3 select * from t2;
+insert into t3 select seq from seq_1001_to_2000;
+--let $query = UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT b FROM t3 WHERE t3.b < 3)
+--let $select = SELECT * FROM t1, t2 WHERE a IN (SELECT b FROM t3 WHERE t3.b < 3)
--source include/explain_utils.inc
-DROP TABLE t1, t2;
+DROP TABLE t1, t2, t3;
--echo #8
CREATE TABLE t1 (a INT);
@@ -197,7 +201,7 @@ DROP TABLE t1, t2, t3;
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (2), (3);
CREATE TABLE t2 (a INT);
-INSERT INTO t2 VALUES (1), (2), (3);
+INSERT INTO t2 VALUES (1), (2), (3), (1000);
--let $query = UPDATE t1 SET a = 10 WHERE a IN (SELECT a FROM t2)
--let $select = SELECT * FROM t1 WHERE a IN (SELECT a FROM t2)
--source include/explain_utils.inc
diff --git a/mysql-test/include/galera_variables_ok.inc b/mysql-test/include/galera_variables_ok.inc
index c9a54724c17..e420b3af6c3 100644
--- a/mysql-test/include/galera_variables_ok.inc
+++ b/mysql-test/include/galera_variables_ok.inc
@@ -1,6 +1,6 @@
--disable_query_log
---let $galera_variables_ok = `SELECT COUNT(*) = 50 FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep%'`
+--let $galera_variables_ok = `SELECT COUNT(*) = 51 FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep%'`
--if (!$galera_variables_ok) {
--skip Galera number of variables has changed!
diff --git a/mysql-test/include/icp_tests.inc b/mysql-test/include/icp_tests.inc
index d78fe0dd209..b37f59b46c5 100644
--- a/mysql-test/include/icp_tests.inc
+++ b/mysql-test/include/icp_tests.inc
@@ -486,7 +486,7 @@ CREATE TABLE t1 (
);
INSERT INTO t1 VALUES (1,9),(2,7),(3,6),(4,3),(5,1);
-
+insert into t1 select seq,seq from seq_100_to_110;
EXPLAIN SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
SET SESSION optimizer_switch='index_condition_pushdown=off';
@@ -723,7 +723,6 @@ DROP TABLE t1;
CREATE TABLE t1 (b int NOT NULL, c int, a varchar(1024), PRIMARY KEY (b));
INSERT INTO t1 VALUES (1,4,'Ill');
-insert into t1 select seq+100,5,seq from seq_1_to_100;
CREATE TABLE t2 (a varchar(1024), KEY (a(512)));
INSERT INTO t2 VALUES
@@ -856,6 +855,8 @@ ANALYZE TABLE t1,t2;
SET @save_optimize_switch=@@optimizer_switch;
SET optimizer_switch='materialization=on';
+set @save_optimizer_where_cost=@@optimizer_where_cost;
+set @@optimizer_where_cost=1;
EXPLAIN
SELECT COUNT(*) FROM t1 AS t, t2
@@ -873,6 +874,7 @@ WHERE c = g
OR a = 0 AND h < 'z' );
SET optimizer_switch=@save_optimizer_switch;
+set @@optimizer_where_cost=@save_optimizer_where_cost;
DROP TABLE t1,t2;
diff --git a/mysql-test/include/index_merge1.inc b/mysql-test/include/index_merge1.inc
index 91609f628ca..199fc9d3b2f 100644
--- a/mysql-test/include/index_merge1.inc
+++ b/mysql-test/include/index_merge1.inc
@@ -517,7 +517,7 @@ DROP TABLE t1;
create table t1 (a int);
insert into t1 values (1),(2);
create table t2(a int, b int);
-insert into t2 values (1,1), (2, 1000);
+insert into t2 values (1,1), (2, 1000),(5000,5000);
create table t3 (a int, b int, filler char(100), key(a), key(b));
insert into t3 select 1000, 1000,'filler' from seq_1_to_1000;
diff --git a/mysql-test/include/last_query_cost.inc b/mysql-test/include/last_query_cost.inc
new file mode 100644
index 00000000000..a18fd9e4c04
--- /dev/null
+++ b/mysql-test/include/last_query_cost.inc
@@ -0,0 +1,5 @@
+--disable_query_log
+--disable_column_names
+show status like 'last_query_cost';
+--enable_column_names
+--enable_query_log
diff --git a/mysql-test/include/load_dump_and_upgrade.inc b/mysql-test/include/load_dump_and_upgrade.inc
index 52351ea473c..86ebc12c506 100644
--- a/mysql-test/include/load_dump_and_upgrade.inc
+++ b/mysql-test/include/load_dump_and_upgrade.inc
@@ -46,4 +46,4 @@ SELECT COUNT(*) > 0 AS `mysql.user has data` FROM mysql.user;
# It will fail if the file doesn't exist, which is good,
# which is an extra check that it was written
---remove_file $ddir/mysql_upgrade_info
+--remove_file $ddir/mariadb_upgrade_info
diff --git a/mysql-test/include/mix1.inc b/mysql-test/include/mix1.inc
index 2ec0868c39e..cbb79668b2a 100644
--- a/mysql-test/include/mix1.inc
+++ b/mysql-test/include/mix1.inc
@@ -1183,14 +1183,14 @@ set @my_innodb_autoextend_increment=@@global.innodb_autoextend_increment;
set global innodb_autoextend_increment=8;
set global innodb_autoextend_increment=@my_innodb_autoextend_increment;
-#
-# Bug #37830: ORDER BY ASC/DESC - no difference
-#
+--echo #
+--echo # Bug #37830: ORDER BY ASC/DESC - no difference
+--echo #
CREATE TABLE t1 (a int, b int, c int, PRIMARY KEY (a), KEY t1_b (b))
ENGINE=InnoDB;
-INSERT INTO t1 (a,b,c) VALUES (1,1,1), (2,1,1), (3,1,1), (4,1,1);
+INSERT INTO t1 (a,b,c) VALUES (1,1,1), (2,1,1), (3,1,1), (4,1,1), (100,2,2);
INSERT INTO t1 (a,b,c) SELECT a+4,b,c FROM t1;
-- disable_query_log
diff --git a/mysql-test/include/percona_nonflushing_analyze_debug.inc b/mysql-test/include/percona_nonflushing_analyze_debug.inc
index 95621c70d5c..8cdf6218609 100644
--- a/mysql-test/include/percona_nonflushing_analyze_debug.inc
+++ b/mysql-test/include/percona_nonflushing_analyze_debug.inc
@@ -8,7 +8,7 @@
--connect con1,localhost,root
-SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
+SET DEBUG_SYNC="handler_rnd_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
send_eval SELECT * FROM $percona_nonflushing_analyze_table;
diff --git a/mysql-test/include/rowid_filter_debug_kill.inc b/mysql-test/include/rowid_filter_debug_kill.inc
index 513efed8a4c..230bef6d10c 100644
--- a/mysql-test/include/rowid_filter_debug_kill.inc
+++ b/mysql-test/include/rowid_filter_debug_kill.inc
@@ -2,13 +2,15 @@
--source include/have_debug_sync.inc
--source include/have_sequence.inc
--source include/count_sessions.inc
+--source include/have_sequence.inc
+--source include/no_valgrind_without_big.inc
--echo #
--echo # MDEV-22761 KILL QUERY during rowid_filter, crashes
--echo #
create table t2(a int);
-insert into t2 select * from seq_0_to_99;
+insert into t2 select seq from seq_1_to_100;
# 10K rows
CREATE TABLE t3 (
@@ -18,30 +20,24 @@ CREATE TABLE t3 (
KEY (key1),
KEY (key2)
);
+insert into t3 select seq,seq, 'filler-data-filler-data' from seq_1_to_2000;
+
select engine from information_schema.tables
where table_schema=database() and table_name='t3';
-
-insert into t3
-select
- A.seq,
- B.seq,
- 'filler-data-filler-data'
-from seq_0_to_99 A, seq_0_to_99 B;
-
analyze table t2,t3;
explain
-select * from t2, t3
+select straight_join * from t2, t3
where
- t3.key1=t2.a and t3.key2 in (2,3);
+ t3.key1=t2.a and t3.key2 between 2 and 10;
let $target_id= `select connection_id()`;
set debug_sync='handler_rowid_filter_check SIGNAL at_rowid_filter_check WAIT_FOR go';
send
-select * from t2, t3
+select straight_join * from t2, t3
where
- t3.key1=t2.a and t3.key2 in (2,3);
+ t3.key1=t2.a and t3.key2 between 2 and 10;
connect (con1, localhost, root,,);
set debug_sync='now WAIT_FOR at_rowid_filter_check';
diff --git a/mysql-test/include/world.inc b/mysql-test/include/world.inc
index 1451a4ab3b5..91ea66ef731 100644
--- a/mysql-test/include/world.inc
+++ b/mysql-test/include/world.inc
@@ -4,6 +4,7 @@
# Table Country
+BEGIN;
INSERT IGNORE INTO Country VALUES
('AFG','Afghanistan',652090.00,22720000,1),
('NLD','Netherlands',41526.00,15864000,5),
@@ -5339,5 +5340,6 @@ INSERT INTO CountryLanguage VALUES
('CHN','Dong',0.2),
('RUS','Belorussian',0.3),
('USA','Portuguese',0.2);
+COMMIT;
ANALYZE TABLE Country, City, CountryLanguage;
diff --git a/mysql-test/lib/My/Debugger.pm b/mysql-test/lib/My/Debugger.pm
index c2062c2eaba..412c028cfc5 100644
--- a/mysql-test/lib/My/Debugger.pm
+++ b/mysql-test/lib/My/Debugger.pm
@@ -78,7 +78,7 @@ my %debuggers = (
options => '-f -o {log} {exe} {args}',
},
rr => {
- options => '_RR_TRACE_DIR={log} rr record {exe} {args} --loose-skip-innodb-use-native-aio --loose-innodb-flush-method=fsync',
+ options => '_RR_TRACE_DIR={log} rr record {exe} {args}',
run => 'env',
pre => sub {
::mtr_error('rr requires kernel.perf_event_paranoid <= 1')
diff --git a/mysql-test/main/alter_table_combinations,aria.rdiff b/mysql-test/main/alter_table_combinations,aria.rdiff
index 9ea38135908..e030571679f 100644
--- a/mysql-test/main/alter_table_combinations,aria.rdiff
+++ b/mysql-test/main/alter_table_combinations,aria.rdiff
@@ -1,5 +1,5 @@
---- main/alter_table_combinations.result 2022-05-24 17:16:56.769146869 +0200
-+++ main/alter_table_combinations.reject 2022-05-24 17:25:20.847126357 +0200
+--- main/alter_table_combinations.result
++++ main/alter_table_combinations.reject
@@ -173,8 +173,7 @@
t3 CREATE TABLE `t3` (
`a` int(11) DEFAULT NULL,
diff --git a/mysql-test/main/alter_table_combinations,heap.rdiff b/mysql-test/main/alter_table_combinations,heap.rdiff
index 0ca6d3de88d..493ce0ea884 100644
--- a/mysql-test/main/alter_table_combinations,heap.rdiff
+++ b/mysql-test/main/alter_table_combinations,heap.rdiff
@@ -1,5 +1,5 @@
---- main/alter_table_combinations.result 2022-05-24 17:16:56.769146869 +0200
-+++ main/alter_table_combinations.reject 2022-05-24 17:25:01.216127156 +0200
+--- main/alter_table_combinations.result
++++ main/alter_table_combinations.reject
@@ -11,7 +11,7 @@
alter table t1 change x xx int, algorithm=inplace;
check table t1;
diff --git a/mysql-test/main/analyze_format_json.result b/mysql-test/main/analyze_format_json.result
index 9a756782f96..02635a8f3dd 100644
--- a/mysql-test/main/analyze_format_json.result
+++ b/mysql-test/main/analyze_format_json.result
@@ -10,6 +10,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -17,9 +18,11 @@ ANALYZE
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -46,6 +49,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -53,9 +57,11 @@ ANALYZE
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -72,9 +78,11 @@ ANALYZE
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t0.a"],
+ "loops": 10,
"r_loops": 0,
"rows": 1,
"r_rows": null,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": null
}
@@ -96,6 +104,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -103,9 +112,11 @@ ANALYZE
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -122,9 +133,11 @@ ANALYZE
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t0.a"],
+ "loops": 10,
"r_loops": 10,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -149,6 +162,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -156,9 +170,11 @@ ANALYZE
"table": {
"table_name": "tbl1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 100,
"r_rows": 100,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -171,9 +187,11 @@ ANALYZE
"table": {
"table_name": "tbl2",
"access_type": "ALL",
+ "loops": 100,
"r_loops": 1,
"rows": 100,
"r_rows": 100,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -183,7 +201,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "1Kb",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -198,6 +217,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -205,9 +225,11 @@ ANALYZE
"table": {
"table_name": "tbl1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 100,
"r_rows": 100,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -220,9 +242,11 @@ ANALYZE
"table": {
"table_name": "tbl2",
"access_type": "ALL",
+ "loops": 100,
"r_loops": 1,
"rows": 100,
"r_rows": 100,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -233,7 +257,8 @@ ANALYZE
"buffer_size": "1Kb",
"join_type": "BNL",
"attached_condition": "tbl1.c > tbl2.c",
- "r_filtered": 15.83333333
+ "r_filtered": 15.83333333,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -256,6 +281,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -263,9 +289,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -282,9 +310,11 @@ ANALYZE
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
+ "loops": 10,
"r_loops": 10,
- "rows": 2,
+ "rows": 1,
"r_rows": 0.2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -317,6 +347,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -324,9 +355,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -381,6 +414,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -392,9 +426,11 @@ ANALYZE
"key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["pk"],
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -461,6 +497,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -468,9 +505,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 5,
"r_rows": 5,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -494,9 +533,11 @@ ANALYZE
"table_name": "t2",
"access_type": "ALL",
"possible_keys": ["key1", "key2", "key3", "key4"],
+ "loops": 5,
"r_loops": 5,
"rows": 1010,
"r_rows": 203.8,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -531,6 +572,7 @@ ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -538,9 +580,11 @@ ANALYZE
"table": {
"table_name": "tbl1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -555,6 +599,7 @@ ANALYZE
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -562,9 +607,11 @@ ANALYZE
"table": {
"table_name": "tbl2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -602,6 +649,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"having_condition": "TOP > t2.a",
@@ -619,9 +667,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 256,
"r_rows": 256,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -642,6 +692,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"filesort": {
@@ -658,9 +709,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 256,
"r_rows": 256,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -695,6 +748,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"filesort": {
@@ -711,9 +765,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 256,
"r_rows": 256,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -741,6 +797,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -748,9 +805,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -762,9 +821,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 2,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -775,7 +836,8 @@ ANALYZE
"buffer_size": "65",
"join_type": "BNL",
"attached_condition": "<in_optimizer>(t2.b,t2.b in (subquery#2))",
- "r_filtered": null
+ "r_filtered": null,
+ "r_unpack_time_ms": "REPLACED"
}
}
],
@@ -783,6 +845,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -790,9 +853,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -805,6 +870,8 @@ ANALYZE
]
}
}
+SELECT STRAIGHT_JOIN * FROM t1, t2 WHERE b IN ( SELECT a FROM t1 );
+a b
drop table t1,t2;
#
# MDEV-8864: Server crash #2 in Item_field::print on ANALYZE FORMAT=JSON
@@ -827,6 +894,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"filesort": {
@@ -852,9 +920,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -866,9 +936,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 2,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -878,7 +950,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "1",
"join_type": "BNL",
- "r_filtered": null
+ "r_filtered": null,
+ "r_unpack_time_ms": "REPLACED"
}
}
],
@@ -889,14 +962,17 @@ ANALYZE
"r_loops": 0,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 0,
"rows": 2,
"r_rows": null,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": null
}
@@ -906,9 +982,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 2,
"r_loops": 0,
"rows": 2,
"r_rows": null,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": null
},
@@ -916,7 +994,8 @@ ANALYZE
"buffer_size": "65",
"join_type": "BNL",
"attached_condition": "t2.f2 = t3.f3",
- "r_filtered": null
+ "r_filtered": null,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -947,6 +1026,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -954,9 +1034,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -972,6 +1054,7 @@ ANALYZE
"r_loops": 0,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"nested_loop": [
{
"read_sorted_file": {
@@ -986,9 +1069,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 0,
"rows": 10,
"r_rows": null,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": null,
"attached_condition": "t2.a < t1.a"
diff --git a/mysql-test/main/analyze_format_json.test b/mysql-test/main/analyze_format_json.test
index 3f3324e9eec..84625f125fb 100644
--- a/mysql-test/main/analyze_format_json.test
+++ b/mysql-test/main/analyze_format_json.test
@@ -154,6 +154,7 @@ drop table t0, t1;
--echo #
--echo # MDEV-7970: EXPLAIN FORMAT=JSON does not print HAVING
+--source include/explain-no-costs.inc
--echo #
create table t0(a int);
insert into t0 values (0),(1),(2),(3);
@@ -190,6 +191,7 @@ INSERT INTO t2 VALUES (3),(4);
--source include/analyze-format.inc
ANALYZE FORMAT=JSON SELECT STRAIGHT_JOIN * FROM t1, t2 WHERE b IN ( SELECT a FROM t1 );
+SELECT STRAIGHT_JOIN * FROM t1, t2 WHERE b IN ( SELECT a FROM t1 );
drop table t1,t2;
diff --git a/mysql-test/main/analyze_stmt.result b/mysql-test/main/analyze_stmt.result
index c5d35759c9c..8ba089d8d31 100644
--- a/mysql-test/main/analyze_stmt.result
+++ b/mysql-test/main/analyze_stmt.result
@@ -258,7 +258,7 @@ drop table t1;
create table t1 (i int);
analyze delete from t1 returning *;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 0 0.00 100.00 100.00
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
drop table t1;
#
# MDEV-6396: ANALYZE INSERT/REPLACE is accepted, but does not produce a plan
@@ -314,7 +314,7 @@ insert into t2 values (0),(1);
analyze select * from t1 straight_join t2 force index(a) where t2.a=t1.a;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 100.00 100.00 Using where
-1 SIMPLE t2 ref a a 5 test.t1.a 2 0.20 100.00 100.00 Using index
+1 SIMPLE t2 ref a a 5 test.t1.a 1 0.20 100.00 100.00 Using index
drop table t1,t2;
#
# MDEV-8063: Unconditional ANALYZE DELETE does not delete rows
diff --git a/mysql-test/main/analyze_stmt_orderby.result b/mysql-test/main/analyze_stmt_orderby.result
index 76bc4d964b8..c3a3f2c562e 100644
--- a/mysql-test/main/analyze_stmt_orderby.result
+++ b/mysql-test/main/analyze_stmt_orderby.result
@@ -182,6 +182,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t2.b",
"temporary_table": {
@@ -190,7 +191,9 @@ EXPLAIN
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t0.a is not null"
}
@@ -204,7 +207,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t0.a"],
+ "loops": 10,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -222,6 +227,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"filesort": {
@@ -238,9 +244,11 @@ ANALYZE
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -257,9 +265,11 @@ ANALYZE
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t0.a"],
+ "loops": 10,
"r_loops": 10,
"rows": 1,
"r_rows": 0.4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -285,6 +295,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"read_sorted_file": {
@@ -293,7 +304,9 @@ EXPLAIN
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t0.a is not null"
}
@@ -309,7 +322,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t0.a"],
+ "loops": 10,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -325,6 +340,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -342,9 +358,11 @@ ANALYZE
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -363,9 +381,11 @@ ANALYZE
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t0.a"],
+ "loops": 10,
"r_loops": 10,
"rows": 1,
"r_rows": 0.4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -396,6 +416,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"filesort": {
@@ -412,9 +433,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 1000,
"r_rows": 1000,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -459,6 +482,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"duplicate_removal": {
@@ -468,9 +492,11 @@ ANALYZE
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -482,9 +508,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 10,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -494,7 +522,8 @@ ANALYZE
"buffer_size": "65",
"join_type": "BNL",
"attached_condition": "t3.a = t0.a",
- "r_filtered": 10
+ "r_filtered": 10,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -526,6 +555,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"filesort": {
@@ -551,9 +581,11 @@ ANALYZE
"table": {
"table_name": "t6",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 5,
"r_rows": 5,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -566,9 +598,11 @@ ANALYZE
"table": {
"table_name": "t5",
"access_type": "ALL",
+ "loops": 5,
"r_loops": 1,
"rows": 7,
"r_rows": 7,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -578,7 +612,8 @@ ANALYZE
"buffer_size": "119",
"join_type": "BNL",
"attached_condition": "t5.a = t6.a",
- "r_filtered": 21.42857143
+ "r_filtered": 21.42857143,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -596,6 +631,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "count(distinct t5.b)",
"temporary_table": {
@@ -607,7 +643,9 @@ EXPLAIN
"table": {
"table_name": "t6",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t6.b > 0 and t6.a <= 5"
}
@@ -617,7 +655,9 @@ EXPLAIN
"table": {
"table_name": "t5",
"access_type": "ALL",
+ "loops": 5,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -664,6 +704,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -674,9 +715,11 @@ ANALYZE
"key": "idx",
"key_length": "5",
"used_key_parts": ["col1"],
+ "loops": 1,
"r_loops": 1,
"rows": 7,
"r_rows": 20,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
diff --git a/mysql-test/main/analyze_stmt_orderby.test b/mysql-test/main/analyze_stmt_orderby.test
index ecee8040ed5..519cae45a97 100644
--- a/mysql-test/main/analyze_stmt_orderby.test
+++ b/mysql-test/main/analyze_stmt_orderby.test
@@ -21,6 +21,7 @@ insert into t2 select A.a*1000 + B.a, A.a*1000 + B.a from t0 A, t1 B;
--echo #
explain
update t2 set b=b+1 order by b limit 5;
+--source include/explain-no-costs.inc
explain format=json
update t2 set b=b+1 order by b limit 5;
--source include/analyze-format.inc
@@ -32,6 +33,7 @@ update t2 set b=b+1 order by b limit 5;
--echo #
explain
update t2 set a=a+1 where a<10;
+--source include/explain-no-costs.inc
explain format=json
update t2 set a=a+1 where a<10;
--source include/analyze-format.inc
@@ -43,6 +45,7 @@ update t2 set a=a+1 where a<10;
--echo #
explain
delete from t2 order by b limit 5;
+--source include/explain-no-costs.inc
explain format=json
delete from t2 order by b limit 5;
--source include/analyze-format.inc
@@ -54,6 +57,7 @@ delete from t2 order by b limit 5;
--echo #
explain
select * from t0,t2 where t2.a=t0.a order by t2.b limit 4;
+--source include/explain-no-costs.inc
explain format=json
select * from t0,t2 where t2.a=t0.a order by t2.b limit 4;
--source include/analyze-format.inc
@@ -66,6 +70,7 @@ select * from t0,t2 where t2.a=t0.a order by t2.b limit 4;
--echo #
explain
select * from t0,t2 where t2.a=t0.a order by t0.a limit 4;
+--source include/explain-no-costs.inc
explain format=json
select * from t0,t2 where t2.a=t0.a order by t0.a limit 4;
--source include/analyze-format.inc
@@ -143,6 +148,7 @@ select count(distinct t5.b) as sum from t5, t6
where t5.a=t6.a and t6.b > 0 and t5.a <= 5
group by t5.a order by sum limit 1;
+--source include/explain-no-costs.inc
explain format=json
select count(distinct t5.b) as sum from t5, t6
where t5.a=t6.a and t6.b > 0 and t5.a <= 5
diff --git a/mysql-test/main/analyze_stmt_privileges2.result b/mysql-test/main/analyze_stmt_privileges2.result
index 2b75f736a22..3f09b1722b6 100644
--- a/mysql-test/main/analyze_stmt_privileges2.result
+++ b/mysql-test/main/analyze_stmt_privileges2.result
@@ -377,13 +377,11 @@ a b
EXPLAIN SELECT * FROM t1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM t1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 4.00 100.00 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 0.00 100.00 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 3.00 100.00 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 3.00 33.33 0.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#------------------------------------------------------------------------
# I/R/U/D/S on the inner view
# Expectation: Can run everything
@@ -492,13 +490,11 @@ a b
EXPLAIN SELECT * FROM v1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 8.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#------------------------------------------------------------------------
# I/R/U/D/S on the outer view
# Expectation: Can run everything
@@ -599,13 +595,11 @@ a b
EXPLAIN SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 12 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 12 12.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#========================================================================
# Test: Grant INSERT on the table
@@ -1445,10 +1439,10 @@ id select_type table type possible_keys key key_len ref rows r_rows filtered r_f
DELETE FROM t1 WHERE a = 10;
EXPLAIN DELETE FROM t1 WHERE a = 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1 Using where
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
ANALYZE DELETE FROM t1 WHERE a = 10;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1 0.00 100.00 100.00 Using where
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
DELETE FROM t1 USING t1, t2;
EXPLAIN DELETE FROM t1 USING t1, t2;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1592,13 +1586,11 @@ a b
EXPLAIN SELECT * FROM v1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 4.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#------------------------------------------------------------------------
# I/R/U/D/S on the outer view
# Expectation: Can run everything: SELECT access to the column `a`
@@ -1709,13 +1701,11 @@ a b
EXPLAIN SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 8.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#========================================================================
# Test: Grant SELECT, INSERT, UPDATE, DELETE on the table
@@ -1790,10 +1780,10 @@ id select_type table type possible_keys key key_len ref rows r_rows filtered r_f
DELETE FROM t1 WHERE a = 10;
EXPLAIN DELETE FROM t1 WHERE a = 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1 Using where
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
ANALYZE DELETE FROM t1 WHERE a = 10;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1 0.00 100.00 100.00 Using where
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
DELETE FROM t1 USING t1, t2;
EXPLAIN DELETE FROM t1 USING t1, t2;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1941,13 +1931,11 @@ a b
EXPLAIN SELECT * FROM v1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 4.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#------------------------------------------------------------------------
# I/R/U/D/S on the outer view
# Expectation: Can run everything
@@ -2049,13 +2037,11 @@ a b
EXPLAIN SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 8.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#########################################################################
# Inner view permission tests
@@ -2698,13 +2684,11 @@ a b
EXPLAIN SELECT * FROM v1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 14 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 14 14.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#------------------------------------------------------------------------
# I/R/U/D/S on the outer view
# Expectation: Can run everything
@@ -2805,13 +2789,11 @@ a b
EXPLAIN SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 18 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 18 18.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#========================================================================
# Test: Grant INSERT on the inner view
@@ -3988,13 +3970,11 @@ a b
EXPLAIN SELECT * FROM v1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 35 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v1 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 35 35.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#------------------------------------------------------------------------
# I/R/U/D/S on the outer view
# Expectation: Can run everything
@@ -4095,13 +4075,11 @@ a b
EXPLAIN SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 39 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 39 39.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#########################################################################
# Outer view permission tests
@@ -4615,13 +4593,11 @@ a b
EXPLAIN SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 39 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 39 39.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
#========================================================================
# Test: Grant INSERT on the outer view
@@ -5222,13 +5198,11 @@ a b
EXPLAIN SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 44 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
ANALYZE SELECT * FROM v2 WHERE a IN ( SELECT a FROM t2 );
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 44 44.00 100.00 0.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 NULL 100.00 NULL
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 NULL 100.00 NULL
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 NULL 33.33 NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
disconnect con1;
connection default;
DROP USER 'privtest'@localhost;
diff --git a/mysql-test/main/brackets.result b/mysql-test/main/brackets.result
index f87afdc0f47..0403ce81d1d 100644
--- a/mysql-test/main/brackets.result
+++ b/mysql-test/main/brackets.result
@@ -261,6 +261,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"read_sorted_file": {
@@ -269,7 +270,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -291,6 +294,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"read_sorted_file": {
@@ -299,7 +303,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -329,12 +335,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 20"
}
@@ -346,12 +355,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -383,12 +395,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 20"
}
@@ -400,12 +415,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -445,6 +463,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"read_sorted_file": {
@@ -453,7 +472,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -466,12 +487,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.pk > 4"
}
diff --git a/mysql-test/main/brackets.test b/mysql-test/main/brackets.test
index 9a0c204e271..4300e8a0592 100644
--- a/mysql-test/main/brackets.test
+++ b/mysql-test/main/brackets.test
@@ -117,24 +117,28 @@ let $q1=
select a from t1 order by a desc limit 1;
eval $q1;
eval explain extended $q1;
+--source include/explain-no-costs.inc
eval explain format=json $q1;
let $q2=
(select a from t1 order by a desc) limit 1;
eval $q2;
eval explain extended $q2;
+--source include/explain-no-costs.inc
eval explain format=json $q2;
let $q1=
(select a from t1 where a=20 union select a from t1) order by a desc limit 1;
eval $q1;
eval explain extended $q1;
+--source include/explain-no-costs.inc
eval explain format=json $q1;
let $q2=
((select a from t1 where a=20 union select a from t1) order by a desc) limit 1;
eval $q2;
eval explain extended $q2;
+--source include/explain-no-costs.inc
eval explain format=json $q2;
drop table t1;
@@ -150,6 +154,7 @@ let $q=
((select * from t1 order by pk) limit 2) union (select * from t1 where pk > 4);
eval $q;
eval explain extended $q;
+--source include/explain-no-costs.inc
eval explain format=json $q;
drop table t1;
diff --git a/mysql-test/main/bug12427262.result b/mysql-test/main/bug12427262.result
index 6e79ec3aa11..8ec14efc45e 100644
--- a/mysql-test/main/bug12427262.result
+++ b/mysql-test/main/bug12427262.result
@@ -16,8 +16,6 @@ create table t10 (c1 int);
select Sum(ALL(COUNT_READ)) from performance_schema.file_summary_by_instance where FILE_NAME
like "%show_table_lw_db%" AND FILE_NAME like "%.frm%" AND EVENT_NAME='wait/io/file/sql/FRM'
into @count_read_before;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
show tables;
Tables_in_show_table_lw_db
t1
@@ -33,8 +31,6 @@ t9
select Sum(ALL(COUNT_READ)) from performance_schema.file_summary_by_instance where FILE_NAME
like "%show_table_lw_db%" AND FILE_NAME like "%.frm%" AND EVENT_NAME='wait/io/file/sql/FRM'
into @count_read_after;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select @count_read_after-@count_read_before;
@count_read_after-@count_read_before
0.00000000000000000000000000000000000000
@@ -53,8 +49,6 @@ t9 BASE TABLE
select Sum(ALL(COUNT_READ)) from performance_schema.file_summary_by_instance where FILE_NAME
like "%show_table_lw_db%" AND FILE_NAME like "%.frm%" AND EVENT_NAME='wait/io/file/sql/FRM'
into @count_read_after;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select @count_read_after-@count_read_before;
@count_read_after-@count_read_before
10.00000000000000000000000000000000000000
diff --git a/mysql-test/main/bug46760-master.opt b/mysql-test/main/bug46760-master.opt
index 2d7be7fb9b1..5b4331aa7a6 100644
--- a/mysql-test/main/bug46760-master.opt
+++ b/mysql-test/main/bug46760-master.opt
@@ -1,2 +1 @@
--loose-innodb-lock-wait-timeout=2
---loose-innodb-file-per-table
diff --git a/mysql-test/main/column_compression_parts.test b/mysql-test/main/column_compression_parts.test
index 4c77a7308f7..8dfb304173b 100644
--- a/mysql-test/main/column_compression_parts.test
+++ b/mysql-test/main/column_compression_parts.test
@@ -141,7 +141,6 @@ CREATE TABLE t1 (a BLOB COMPRESSED)
CREATE TABLE t1 (a VARCHAR(200) COMPRESSED) PARTITION BY KEY(a) partitions 30;
ALTER TABLE t1 COALESCE PARTITION 20;
-#ALTER TABLE t1 ADD PARTITION (PARTITION pm TABLESPACE = `innodb_file_per_table`); --mdev MDEV-13584
ALTER TABLE t1 ADD PARTITION (PARTITION pm);
CREATE TABLE t2 like t1;
ALTER TABLE t2 REMOVE PARTITIONING;
diff --git a/mysql-test/main/comments.result b/mysql-test/main/comments.result
index c13eb510326..506c31b07b6 100644
--- a/mysql-test/main/comments.result
+++ b/mysql-test/main/comments.result
@@ -64,7 +64,10 @@ SELECT 1 /*!99999 +1*/;
SELECT 1 /*!100000 +1*/;
1 +1
2
-SELECT 1 /*!110000 +1*/;
+SELECT 1 /*!210000 +1*/;
+1
+1
+SELECT 1 /*!190000 +1*/;
1
1
#
@@ -86,7 +89,10 @@ SELECT 1 /*M!99999 +1*/;
SELECT 1 /*M!100000 +1*/;
1 +1
2
-SELECT 1 /*M!110000 +1*/;
+SELECT 1 /*M!210000 +1*/;
+1
+1
+SELECT 1 /*M!190000 +1*/;
1
1
select 1/*!2*/;
diff --git a/mysql-test/main/comments.test b/mysql-test/main/comments.test
index 6cf69635d1e..42cdc4255fd 100644
--- a/mysql-test/main/comments.test
+++ b/mysql-test/main/comments.test
@@ -41,7 +41,8 @@ SELECT 1 /*!50700 +1*/;
SELECT 1 /*!50999 +1*/;
SELECT 1 /*!99999 +1*/;
SELECT 1 /*!100000 +1*/;
-SELECT 1 /*!110000 +1*/;
+SELECT 1 /*!210000 +1*/;
+SELECT 1 /*!190000 +1*/;
--echo #
--echo # Tesing that versions >= 5.7.x and < 10.0.0 are not ignored
@@ -52,7 +53,8 @@ SELECT 1 /*M!50700 +1*/;
SELECT 1 /*M!50999 +1*/;
SELECT 1 /*M!99999 +1*/;
SELECT 1 /*M!100000 +1*/;
-SELECT 1 /*M!110000 +1*/;
+SELECT 1 /*M!210000 +1*/;
+SELECT 1 /*M!190000 +1*/;
#
# Bug#25411 (trigger code truncated)
diff --git a/mysql-test/main/compress.result b/mysql-test/main/compress.result
index 24979346149..f5c85d3eb6f 100644
--- a/mysql-test/main/compress.result
+++ b/mysql-test/main/compress.result
@@ -6,6 +6,7 @@ select * from information_schema.session_status where variable_name= 'COMPRESSIO
VARIABLE_NAME VARIABLE_VALUE
COMPRESSION ON
drop table if exists t1,t2,t3,t4;
+set @@default_storage_engine="aria";
CREATE TABLE t1 (
Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
@@ -606,6 +607,9 @@ explain select t3.t2nr,fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL fld1 NULL NULL NULL 1199 Using where; Using temporary; Using filesort
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.fld1 1 Using where; Using index
+#
+# Some test with ORDER BY and limit
+#
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using filesort
@@ -1295,7 +1299,7 @@ companynr tinyint(2) unsigned zerofill NOT NULL default '00',
companyname char(30) NOT NULL default '',
PRIMARY KEY (companynr),
UNIQUE KEY companyname(companyname)
-) ENGINE=MyISAM MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
+) ENGINE=aria MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
select STRAIGHT_JOIN t2.companynr,companyname from t4,t2 where t2.companynr=t4.companynr group by t2.companynr;
companynr companyname
00 Unknown
@@ -1385,6 +1389,9 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
delete from t2 where fld1=999999;
+#
+# Test left join optimization
+#
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
@@ -1399,15 +1406,15 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 and companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr is null;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1423,11 +1430,11 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0 or companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where ifnull(companynr,1)>0;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/main/costs.result b/mysql-test/main/costs.result
new file mode 100644
index 00000000000..9d69207f956
--- /dev/null
+++ b/mysql-test/main/costs.result
@@ -0,0 +1,126 @@
+create table t1 (a int primary key, b int, c int, d int, e int, key ba (b,a), key bda (b,d,a), key cba (c,b,a), key cb (c,b), key d (d)) engine=aria;
+insert into t1 select seq,seq,seq,seq,seq from seq_1_to_10;
+insert into t1 values(20,2,2,2,2),(21,3,4,5,6);
+#
+# Get different scan costs
+#
+explain select sum(e) as "table_scan" from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 12
+Last_query_cost 0.012556
+explain select sum(a) as "index scan" from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL PRIMARY 4 NULL 12 Using index
+Last_query_cost 0.007441
+#
+# Range scans should be used if we don't examine all rows in the table
+#
+explain select count(a) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+Last_query_cost 0.000000
+explain select count(*) from t1 where a > 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 12 Using where; Using index
+Last_query_cost 0.002877
+explain select count(*) from t1 where a > 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 12 Using where; Using index
+Last_query_cost 0.002877
+explain select count(*) from t1 where a > 2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 11 Using where; Using index
+Last_query_cost 0.002747
+#
+# Shorter indexes are prefered over longer indexs
+#
+explain select sum(a+b) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL ba 9 NULL 12 Using index
+Last_query_cost 0.007441
+explain select count(*) from t1 where b between 5 and 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range ba,bda ba 5 NULL 6 Using where; Using index
+Last_query_cost 0.002097
+explain select sum(b+c) from t1 where b between 5 and 6 and c between 5 and 6;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range ba,bda,cba,cb cba 10 NULL 2 Using where; Using index
+Last_query_cost 0.001577
+# Cost of 'd' should be slightly smaller as key 'ba' is longer than 'd'
+explain select count(*) from t1 where b > 6;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range ba,bda ba 5 NULL 5 Using where; Using index
+Last_query_cost 0.001967
+explain select count(*) from t1 where d > 6;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range d d 5 NULL 5 Using where; Using index
+Last_query_cost 0.001967
+#
+# Check covering index usage
+#
+explain select a,b,c from t1 where a=b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL cba 14 NULL 12 Using where; Using index
+Last_query_cost 0.007441
+#
+# Prefer ref keys over ranges
+#
+explain select count(*) from t1 where b=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref ba,bda ba 5 const 2 Using index
+Last_query_cost 0.001141
+explain select count(*) from t1 where b=2 and c=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref ba,bda,cba,cb cba 10 const,const 2 Using index
+Last_query_cost 0.001141
+explain select count(*) from t1 where b=3 and c between 3 and 4;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range ba,bda,cba,cb cba 10 NULL 2 Using where; Using index
+Last_query_cost 0.001577
+#
+# Prefer eq keys over ref keys
+#
+explain select a,b,e from t1 where a=10 or a=11;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 Using index condition
+Last_query_cost 0.003126
+explain select a,b,e from t1 where d=10 or d=11;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range d d 5 NULL 2 Using index condition
+Last_query_cost 0.003291
+drop table t1;
+#
+# MDEV-30328 Assertion `avg_io_cost != 0.0 || index_cost.io + row_cost.io == 0' failed in
+# Cost_estimate::total_cost()
+#
+set @save=@@InnoDB.optimizer_disk_read_ratio;
+set global InnoDB.optimizer_disk_read_ratio=0;
+create table t1 (
+`l_orderkey` int(11) NOT NULL,
+`l_partkey` int(11) DEFAULT NULL,
+`l_suppkey` int(11) DEFAULT NULL,
+`l_linenumber` int(11) NOT NULL,
+`l_extra` int(11) NOT NULL,
+`l_quantity` double DEFAULT NULL,
+`l_extendedprice` double DEFAULT NULL,
+`l_discount` double DEFAULT NULL,
+`l_tax` double DEFAULT NULL,
+`l_returnflag` char(1) DEFAULT NULL,
+`l_linestatus` char(1) DEFAULT NULL,
+`l_shipDATE` date DEFAULT NULL,
+`l_commitDATE` date DEFAULT NULL,
+`l_receiptDATE` date DEFAULT NULL,
+`l_shipinstruct` char(25) DEFAULT NULL,
+`l_shipmode` char(10) DEFAULT NULL,
+`l_comment` varchar(44) DEFAULT NULL,
+PRIMARY KEY (`l_orderkey`),
+UNIQUE (`l_linenumber`),
+UNIQUE (`l_extra`) ,
+KEY `l_suppkey` (l_suppkey, l_partkey),
+KEY `long_suppkey` (l_partkey, l_suppkey, l_linenumber, l_extra) )
+ENGINE= InnoDB;
+explain select count(*) from test.t1 force index (l_suppkey) where l_suppkey >= 0 and l_partkey >=0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range l_suppkey l_suppkey 10 NULL 1 Using where; Using index
+drop table t1;
+set global InnoDB.optimizer_disk_read_ratio=@save;
diff --git a/mysql-test/main/costs.test b/mysql-test/main/costs.test
new file mode 100644
index 00000000000..bb933a200db
--- /dev/null
+++ b/mysql-test/main/costs.test
@@ -0,0 +1,116 @@
+#
+# Test of cost calculations. This test is using the Aria engine as the cost
+# calculations are stable for it.
+#
+# This file also includes MDEV's that shows errors in cost calculation functions.
+#
+
+--source include/have_sequence.inc
+--source include/have_innodb.inc
+
+create table t1 (a int primary key, b int, c int, d int, e int, key ba (b,a), key bda (b,d,a), key cba (c,b,a), key cb (c,b), key d (d)) engine=aria;
+insert into t1 select seq,seq,seq,seq,seq from seq_1_to_10;
+insert into t1 values(20,2,2,2,2),(21,3,4,5,6);
+
+--echo #
+--echo # Get different scan costs
+--echo #
+
+explain select sum(e) as "table_scan" from t1;
+--source include/last_query_cost.inc
+explain select sum(a) as "index scan" from t1;
+--source include/last_query_cost.inc
+
+--echo #
+--echo # Range scans should be used if we don't examine all rows in the table
+--echo #
+explain select count(a) from t1;
+--source include/last_query_cost.inc
+explain select count(*) from t1 where a > 0;
+--source include/last_query_cost.inc
+explain select count(*) from t1 where a > 1;
+--source include/last_query_cost.inc
+explain select count(*) from t1 where a > 2;
+--source include/last_query_cost.inc
+
+--echo #
+--echo # Shorter indexes are prefered over longer indexs
+--echo #
+explain select sum(a+b) from t1;
+--source include/last_query_cost.inc
+explain select count(*) from t1 where b between 5 and 10;
+--source include/last_query_cost.inc
+explain select sum(b+c) from t1 where b between 5 and 6 and c between 5 and 6;
+--source include/last_query_cost.inc
+
+--echo # Cost of 'd' should be slightly smaller as key 'ba' is longer than 'd'
+explain select count(*) from t1 where b > 6;
+--source include/last_query_cost.inc
+explain select count(*) from t1 where d > 6;
+--source include/last_query_cost.inc
+
+
+--echo #
+--echo # Check covering index usage
+--echo #
+explain select a,b,c from t1 where a=b;
+--source include/last_query_cost.inc
+
+--echo #
+--echo # Prefer ref keys over ranges
+--echo #
+
+explain select count(*) from t1 where b=2;
+--source include/last_query_cost.inc
+explain select count(*) from t1 where b=2 and c=2;
+--source include/last_query_cost.inc
+explain select count(*) from t1 where b=3 and c between 3 and 4;
+--source include/last_query_cost.inc
+
+--echo #
+--echo # Prefer eq keys over ref keys
+--echo #
+
+explain select a,b,e from t1 where a=10 or a=11;
+--source include/last_query_cost.inc
+explain select a,b,e from t1 where d=10 or d=11;
+--source include/last_query_cost.inc
+
+drop table t1;
+
+--echo #
+--echo # MDEV-30328 Assertion `avg_io_cost != 0.0 || index_cost.io + row_cost.io == 0' failed in
+--echo # Cost_estimate::total_cost()
+--echo #
+
+set @save=@@InnoDB.optimizer_disk_read_ratio;
+set global InnoDB.optimizer_disk_read_ratio=0;
+
+create table t1 (
+ `l_orderkey` int(11) NOT NULL,
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL,
+ `l_extra` int(11) NOT NULL,
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ PRIMARY KEY (`l_orderkey`),
+ UNIQUE (`l_linenumber`),
+ UNIQUE (`l_extra`) ,
+ KEY `l_suppkey` (l_suppkey, l_partkey),
+ KEY `long_suppkey` (l_partkey, l_suppkey, l_linenumber, l_extra) )
+ ENGINE= InnoDB;
+explain select count(*) from test.t1 force index (l_suppkey) where l_suppkey >= 0 and l_partkey >=0;
+drop table t1;
+
+set global InnoDB.optimizer_disk_read_ratio=@save;
diff --git a/mysql-test/main/crash_commit_before-master.opt b/mysql-test/main/crash_commit_before-master.opt
index f464a1013d7..d1bf57fe820 100644
--- a/mysql-test/main/crash_commit_before-master.opt
+++ b/mysql-test/main/crash_commit_before-master.opt
@@ -1,3 +1,2 @@
--loose-skip-stack-trace --skip-core-file
--default-storage-engine=MyISAM
---loose-skip-innodb-file-per-table
diff --git a/mysql-test/main/cte_nonrecursive.result b/mysql-test/main/cte_nonrecursive.result
index f7871d4f929..5989abb7324 100644
--- a/mysql-test/main/cte_nonrecursive.result
+++ b/mysql-test/main/cte_nonrecursive.result
@@ -85,14 +85,14 @@ with t as (select a, count(*) from t1 where b >= 'c' group by a)
select * from t2,t where t2.c=t.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 1
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
explain
select * from t2, (select a, count(*) from t1 where b >= 'c' group by a) as t
where t2.c=t.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 1
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
# specivication of t contains having
with t as (select a, count(*) from t1 where b >= 'c'
@@ -150,16 +150,14 @@ explain
with t as (select a from t1 where a<5)
select * from t2 where c in (select a from t);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1
-3 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; FirstMatch(t2); Using join buffer (flat, BNL join)
explain
select * from t2
where c in (select a from (select a from t1 where a<5) as t);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; FirstMatch(t2); Using join buffer (flat, BNL join)
# materialized t is used in a subquery
with t as (select count(*) as c from t1 where b >= 'c' group by a)
select * from t2 where c in (select c from t);
@@ -175,7 +173,7 @@ with t as (select count(*) as c from t1 where b >= 'c' group by a)
select * from t2 where c in (select c from t);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived2> ref key0 key0 8 test.t2.c 2 Using where; FirstMatch(t2)
+1 PRIMARY <derived2> ref key0 key0 8 test.t2.c 1 Using where; FirstMatch(t2)
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
explain
select * from t2
@@ -183,7 +181,7 @@ where c in (select c from (select count(*) as c from t1
where b >= 'c' group by a) as t);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived3> ref key0 key0 8 test.t2.c 2 Using where; FirstMatch(t2)
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 8 test.t2.c 1 Using where
3 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
# two references to t specified by a query
# selecting a field: both in main query
@@ -369,7 +367,7 @@ select c as a from t2 where c < 4)
select * from t2,t where t2.c=t.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 1
+1 PRIMARY <derived2> eq_ref distinct_key distinct_key 5 test.t2.c 1
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where
3 UNION t2 ALL NULL NULL NULL NULL 4 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
@@ -381,7 +379,7 @@ select c as a from t2 where c < 4) as t
where t2.c=t.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 1
+1 PRIMARY <derived2> eq_ref distinct_key distinct_key 5 test.t2.c 1
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where
3 UNION t2 ALL NULL NULL NULL NULL 4 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
@@ -597,7 +595,7 @@ explain
select * from v2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 1
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
# with clause in the specification of a view that whose definition
# table alias for a with table
@@ -2300,14 +2298,14 @@ WHERE col1 IN ( SELECT col FROM t );
SELECT * FROM tt;
col2
2018-10-01
-2018-10-01
2017-10-01
+2018-10-01
SELECT t4.col1
FROM tt, t4
WHERE t4.col2 = tt.col2 AND t4.col1 IN ( SELECT col FROM t );
col1
-8
4
+8
DROP TABLE t,tt;
CALL SP1();
col1
diff --git a/mysql-test/main/cte_recursive.result b/mysql-test/main/cte_recursive.result
index 7fb0d5801c1..ed416e7b355 100644
--- a/mysql-test/main/cte_recursive.result
+++ b/mysql-test/main/cte_recursive.result
@@ -689,17 +689,17 @@ from ancestor_couple_ids c, coupled_ancestors h, coupled_ancestors w
where c.h_id = h.id and c.w_id= w.id;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2 100.00 Using where
-1 PRIMARY <derived3> ref key0 key0 5 c.h_id 2 100.00
-1 PRIMARY <derived3> ref key0 key0 5 c.w_id 2 100.00
+1 PRIMARY <derived3> ref key0 key0 5 c.h_id 1 100.00
+1 PRIMARY <derived3> ref key0 key0 5 c.w_id 1 100.00
3 DERIVED folks ALL NULL NULL NULL NULL 12 100.00 Using where
-4 RECURSIVE UNION <derived2> ALL NULL NULL NULL NULL 2 100.00
-4 RECURSIVE UNION p ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join)
-5 RECURSIVE UNION <derived2> ALL NULL NULL NULL NULL 2 100.00
-5 RECURSIVE UNION p ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join)
+4 RECURSIVE UNION p ALL NULL NULL NULL NULL 12 100.00 Using where
+4 RECURSIVE UNION <derived2> ref key0 key0 5 test.p.id 1 100.00
+5 RECURSIVE UNION p ALL NULL NULL NULL NULL 12 100.00 Using where
+5 RECURSIVE UNION <derived2> ref key0 key0 5 test.p.id 1 100.00
NULL UNION RESULT <union3,4,5> ALL NULL NULL NULL NULL NULL NULL
2 DERIVED <derived3> ALL NULL NULL NULL NULL 12 100.00 Using where
Warnings:
-Note 1003 with recursive ancestor_couple_ids(`h_id`,`w_id`) as (/* select#2 */ select `a`.`father` AS `h_id`,`a`.`mother` AS `w_id` from `coupled_ancestors` `a` where `a`.`father` is not null and `a`.`mother` is not null), coupled_ancestors(`id`,`name`,`dob`,`father`,`mother`) as (/* select#3 */ select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where `test`.`folks`.`name` = 'Me' union all /* select#4 */ select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `fa` where `test`.`p`.`id` = `fa`.`h_id` union all /* select#5 */ select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `ma` where `test`.`p`.`id` = `ma`.`w_id`)/* select#1 */ select `h`.`name` AS `name`,`h`.`dob` AS `dob`,`w`.`name` AS `name`,`w`.`dob` AS `dob` from `ancestor_couple_ids` `c` join `coupled_ancestors` `h` join `coupled_ancestors` `w` where `h`.`id` = `c`.`h_id` and `w`.`id` = `c`.`w_id`
+Note 1003 with recursive ancestor_couple_ids(`h_id`,`w_id`) as (/* select#2 */ select `a`.`father` AS `h_id`,`a`.`mother` AS `w_id` from `coupled_ancestors` `a` where `a`.`father` is not null and `a`.`mother` is not null), coupled_ancestors(`id`,`name`,`dob`,`father`,`mother`) as (/* select#3 */ select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where `test`.`folks`.`name` = 'Me' union all /* select#4 */ select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `fa` where `fa`.`h_id` = `test`.`p`.`id` union all /* select#5 */ select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `ma` where `ma`.`w_id` = `test`.`p`.`id`)/* select#1 */ select `h`.`name` AS `name`,`h`.`dob` AS `dob`,`w`.`name` AS `name`,`w`.`dob` AS `dob` from `ancestor_couple_ids` `c` join `coupled_ancestors` `h` join `coupled_ancestors` `w` where `h`.`id` = `c`.`h_id` and `w`.`id` = `c`.`w_id`
# simple mutual recursion
with recursive
ancestor_couple_ids(h_id, w_id)
@@ -1238,9 +1238,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 12
2 DERIVED folks ALL NULL NULL NULL NULL 12 Using where
3 RECURSIVE UNION p ALL PRIMARY NULL NULL NULL 12
-3 RECURSIVE UNION <derived2> ref key0 key0 5 test.p.id 2
+3 RECURSIVE UNION <derived2> ref key0 key0 5 test.p.id 1
4 RECURSIVE UNION p ALL PRIMARY NULL NULL NULL 12
-4 RECURSIVE UNION <derived2> ref key0 key0 5 test.p.id 2
+4 RECURSIVE UNION <derived2> ref key0 key0 5 test.p.id 1
NULL UNION RESULT <union2,3,4> ALL NULL NULL NULL NULL NULL
with recursive
ancestors
@@ -1339,12 +1339,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 1,
"rows": 24,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
@@ -1355,12 +1358,15 @@ EXPLAIN
{
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "folks",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "folks.`name` = 'Me2'"
}
@@ -1372,12 +1378,15 @@ EXPLAIN
"query_block": {
"select_id": 6,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "prev_gen.`id` < 345",
"materialized": {
@@ -1389,12 +1398,15 @@ EXPLAIN
{
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "folks",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "folks.`name` = 'Me'"
}
@@ -1406,13 +1418,16 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "folks",
"access_type": "ALL",
"possible_keys": ["PRIMARY"],
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1421,7 +1436,9 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 12,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -1446,12 +1463,15 @@ EXPLAIN
"query_block": {
"select_id": 5,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 1,
"rows": 24,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "ancestors.`id` < 234"
}
@@ -1499,12 +1519,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
@@ -1515,12 +1538,15 @@ EXPLAIN
{
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "v",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v.`name` = 'Me' and v.father is not null and v.mother is not null"
}
@@ -1534,7 +1560,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["id"],
"ref": ["test.v.father"],
+ "loops": 12,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1547,7 +1575,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["id"],
"ref": ["test.v.mother"],
+ "loops": 12,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1558,12 +1588,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "a.father is not null and a.mother is not null"
}
@@ -1577,7 +1610,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["id"],
"ref": ["a.father"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1590,7 +1625,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["id"],
"ref": ["a.mother"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1824,12 +1861,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
@@ -1840,12 +1880,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1856,12 +1899,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a < 1000"
}
@@ -2460,6 +2506,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -2467,9 +2514,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -2494,6 +2543,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "REPLACED",
"r_loops": 10,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -2501,9 +2551,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 10,
"rows": 2,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -3158,7 +3210,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 16 100.00
2 DERIVED a ALL NULL NULL NULL NULL 16 100.00 Using where
3 RECURSIVE UNION b ALL NULL NULL NULL NULL 16 100.00 Using where
-3 RECURSIVE UNION <derived2> ref key0 key0 35 test.b.departure 2 100.00
+3 RECURSIVE UNION <derived2> ref key0 key0 35 test.b.departure 1 100.00
4 DEPENDENT SUBQUERY <derived2> ALL NULL NULL NULL NULL 16 100.00 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -3261,9 +3313,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 15 Using filesort
2 DERIVED t2 ALL NULL NULL NULL NULL 15 Using where
3 RECURSIVE UNION t2 ALL NULL NULL NULL NULL 15 Using where
-3 RECURSIVE UNION <derived2> ref key0 key0 5 test.t2.id 2
+3 RECURSIVE UNION <derived2> ref key0 key0 5 test.t2.id 1
4 RECURSIVE UNION t2 ALL NULL NULL NULL NULL 15 Using where
-4 RECURSIVE UNION <derived2> ref key0 key0 5 test.t2.id 2
+4 RECURSIVE UNION <derived2> ref key0 key0 5 test.t2.id 1
NULL UNION RESULT <union2,3,4> ALL NULL NULL NULL NULL NULL
DROP TABLE t1,t2;
set tmp_memory_table_size=default;
@@ -3854,8 +3906,8 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
1 PRIMARY <derived3> ref key0 key0 23 test.t1.a1 1 FirstMatch(t1)
3 DERIVED t2 const PRIMARY PRIMARY 22 const 1 Using index
-4 RECURSIVE UNION <derived3> ALL NULL NULL NULL NULL 2 Using where
-4 RECURSIVE UNION tt2 ref b1 b1 23 cte.a2 2
+4 RECURSIVE UNION tt2 ALL b1 NULL NULL NULL 14 Using where
+4 RECURSIVE UNION <derived3> ref key0 key0 23 test.tt2.b1 1
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
analyze format=json select fv
from (select t1.a1, f1(t1.a2) fv from t1) dt
@@ -3869,6 +3921,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -3876,9 +3929,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -3895,9 +3950,11 @@ ANALYZE
"key_length": "23",
"used_key_parts": ["a2"],
"ref": ["test.t1.a1"],
+ "loops": 3,
"r_loops": 3,
"rows": 1,
"r_rows": 0.333333333,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -3941,35 +3998,41 @@ ANALYZE
"query_block": {
"select_id": 4,
"operation": "UNION",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
{
"table": {
- "table_name": "<derived3>",
+ "table_name": "tt2",
"access_type": "ALL",
+ "possible_keys": ["b1"],
+ "loops": 1,
"r_loops": 1,
- "rows": 2,
- "r_rows": 1,
+ "rows": 14,
+ "r_rows": 14,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
"r_filtered": 100,
- "attached_condition": "cte.a2 is not null"
+ "attached_condition": "tt2.b1 is not null"
}
},
{
"table": {
- "table_name": "tt2",
+ "table_name": "<derived3>",
"access_type": "ref",
- "possible_keys": ["b1"],
- "key": "b1",
+ "possible_keys": ["key0"],
+ "key": "key0",
"key_length": "23",
- "used_key_parts": ["b1"],
- "ref": ["cte.a2"],
- "r_loops": 1,
- "rows": 2,
- "r_rows": 1,
+ "used_key_parts": ["a2"],
+ "ref": ["test.tt2.b1"],
+ "loops": 14,
+ "r_loops": 14,
+ "rows": 1,
+ "r_rows": 0.071428571,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -4032,8 +4095,8 @@ FROM cte JOIN t3 ON t3.tm BETWEEN cte.st AND cte.fn)
SELECT t1.* FROM t1 JOIN cte2 USING (YEAR) JOIN cte3 USING (YEAR);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1 100.00
-1 PRIMARY <derived5> ref key0 key0 5 const 0 0.00
-1 PRIMARY <derived4> ref key0 key0 5 const 0 0.00
+1 PRIMARY <derived5> ref key0 key0 5 const 0 100.00
+1 PRIMARY <derived4> ref key0 key0 5 const 0 100.00
2 DERIVED t1 system NULL NULL NULL NULL 1 100.00
3 RECURSIVE UNION t1 system NULL NULL NULL NULL 1 100.00
3 RECURSIVE UNION <derived2> ALL NULL NULL NULL NULL 2 100.00 Using where
@@ -4105,7 +4168,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
2 DERIVED s ALL NULL NULL NULL NULL 4
3 RECURSIVE UNION t1 ALL NULL NULL NULL NULL 4 Using where
-3 RECURSIVE UNION <derived2> ref key0 key0 9 test.t1.c 2
+3 RECURSIVE UNION <derived2> ref key0 key0 9 test.t1.c 1
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
4 UNION <derived2> ALL NULL NULL NULL NULL 4
with recursive r_cte as
@@ -4144,6 +4207,7 @@ ANALYZE
"query_block": {
"select_id": 4,
"operation": "UNION",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -4151,9 +4215,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -4169,6 +4235,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -4176,9 +4243,11 @@ ANALYZE
"table": {
"table_name": "s",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -4192,6 +4261,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -4199,9 +4269,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -4218,9 +4290,11 @@ ANALYZE
"key_length": "9",
"used_key_parts": ["a"],
"ref": ["test.t1.c"],
+ "loops": 4,
"r_loops": 4,
- "rows": 2,
+ "rows": 1,
"r_rows": 0.5,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -4299,6 +4373,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -4306,9 +4381,11 @@ ANALYZE
"table": {
"table_name": "tt",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -4467,9 +4544,9 @@ NULL UNION RESULT <union4,5> ALL NULL NULL NULL NULL NULL
3 DERIVED v ALL NULL NULL NULL NULL 12 Using where
3 DERIVED h ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
3 DERIVED w ALL NULL NULL NULL NULL 12 Using where; Using join buffer (incremental, BNL join)
-2 RECURSIVE UNION <derived4> ALL NULL NULL NULL NULL 2
-2 RECURSIVE UNION h ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
-2 RECURSIVE UNION w ALL NULL NULL NULL NULL 12 Using where; Using join buffer (incremental, BNL join)
+2 RECURSIVE UNION h ALL NULL NULL NULL NULL 12 Using where
+2 RECURSIVE UNION <derived4> ref key0 key0 5 test.h.id 1
+2 RECURSIVE UNION w ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
NULL UNION RESULT <union3,2> ALL NULL NULL NULL NULL NULL
prepare stmt from "with recursive
ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
@@ -4565,9 +4642,9 @@ id select_type table type possible_keys key key_len ref rows Extra
4 RECURSIVE UNION <derived2> ALL NULL NULL NULL NULL 2
5 RECURSIVE UNION <derived2> ALL NULL NULL NULL NULL 2
NULL UNION RESULT <union3,4,5> ALL NULL NULL NULL NULL NULL
-2 DERIVED h ALL NULL NULL NULL NULL 12
-2 DERIVED w ALL NULL NULL NULL NULL 12 Using join buffer (flat, BNL join)
-2 DERIVED <derived3> ALL NULL NULL NULL NULL 12 Using where; Using join buffer (incremental, BNL join)
+2 DERIVED h ALL NULL NULL NULL NULL 12 Using where
+2 DERIVED <derived3> ref key0 key0 5 test.h.id 1
+2 DERIVED w ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
prepare stmt from "with recursive
ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
w_id, w_name, w_dob, w_father, w_mother)
diff --git a/mysql-test/main/cte_recursive.test b/mysql-test/main/cte_recursive.test
index ebea3b96754..4a21c12a9f8 100644
--- a/mysql-test/main/cte_recursive.test
+++ b/mysql-test/main/cte_recursive.test
@@ -1110,6 +1110,7 @@ as
)
select ancestors.name, ancestors.dob from ancestors;
+--source include/explain-no-costs.inc
explain FORMAT=JSON
with recursive
prev_gen
@@ -1139,6 +1140,7 @@ as
select ancestors.name, ancestors.dob from ancestors;
--echo #
+--source include/explain-no-costs.inc
explain format=json
with recursive
ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
@@ -1343,6 +1345,7 @@ drop table folks;
create table t1(a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+--source include/explain-no-costs.inc
explain format=json
with recursive t as (select a from t1 union select a+10 from t where a < 1000)
select * from t;
diff --git a/mysql-test/main/ctype_binary.result b/mysql-test/main/ctype_binary.result
index 24fc961e17d..1dd5f93ad17 100644
--- a/mysql-test/main/ctype_binary.result
+++ b/mysql-test/main/ctype_binary.result
@@ -2763,6 +2763,7 @@ id INT(11) DEFAULT NULL,
date_column DATE DEFAULT NULL,
KEY(date_column));
INSERT INTO t1 VALUES (1,'2010-09-01'),(2,'2010-10-01');
+INSERT INTO t1 VALUES (3,'2012-09-01'),(4,'2012-10-01'),(5,'2012-10-01');
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range date_column date_column 4 NULL 2 Using index condition
diff --git a/mysql-test/main/ctype_collate.result b/mysql-test/main/ctype_collate.result
index 1ae9f295042..29d27fd608b 100644
--- a/mysql-test/main/ctype_collate.result
+++ b/mysql-test/main/ctype_collate.result
@@ -748,7 +748,7 @@ hex(b)
explain
select hex(b) from t1 where b<'zzz' order by b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 4 Using where; Using filesort
+1 SIMPLE t1 range PRIMARY PRIMARY 34 NULL 4 Using where; Using filesort
select hex(b) from t1 where b<'zzz' order by b;
hex(b)
00
diff --git a/mysql-test/main/ctype_cp1251.result b/mysql-test/main/ctype_cp1251.result
index a341d9ce471..475058dc2bb 100644
--- a/mysql-test/main/ctype_cp1251.result
+++ b/mysql-test/main/ctype_cp1251.result
@@ -3175,6 +3175,7 @@ id INT(11) DEFAULT NULL,
date_column DATE DEFAULT NULL,
KEY(date_column));
INSERT INTO t1 VALUES (1,'2010-09-01'),(2,'2010-10-01');
+INSERT INTO t1 VALUES (3,'2012-09-01'),(4,'2012-10-01'),(5,'2012-10-01');
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range date_column date_column 4 NULL 2 Using index condition
diff --git a/mysql-test/main/ctype_gbk.result b/mysql-test/main/ctype_gbk.result
index 79dede6d089..9d7783b07bf 100644
--- a/mysql-test/main/ctype_gbk.result
+++ b/mysql-test/main/ctype_gbk.result
@@ -680,11 +680,7 @@ b MEDIUMTEXT CHARACTER SET big5);
INSERT INTO t1 VALUES
(REPEAT(0x1125,200000), REPEAT(0x1125,200000)), ('', ''), ('', '');
SELECT a FROM t1 GROUP BY 1 LIMIT 1 INTO @nullll;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT b FROM t1 GROUP BY 1 LIMIT 1 INTO @nullll;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
DROP TABLES t1;
End of 5.0 tests
#
diff --git a/mysql-test/main/ctype_latin1.result b/mysql-test/main/ctype_latin1.result
index 5be12e91b68..4088530fc0f 100644
--- a/mysql-test/main/ctype_latin1.result
+++ b/mysql-test/main/ctype_latin1.result
@@ -3484,6 +3484,7 @@ id INT(11) DEFAULT NULL,
date_column DATE DEFAULT NULL,
KEY(date_column));
INSERT INTO t1 VALUES (1,'2010-09-01'),(2,'2010-10-01');
+INSERT INTO t1 VALUES (3,'2012-09-01'),(4,'2012-10-01'),(5,'2012-10-01');
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range date_column date_column 4 NULL 2 Using index condition
diff --git a/mysql-test/main/ctype_ucs.result b/mysql-test/main/ctype_ucs.result
index ce0d695797b..987f1b704fd 100644
--- a/mysql-test/main/ctype_ucs.result
+++ b/mysql-test/main/ctype_ucs.result
@@ -207,8 +207,6 @@ DROP TABLE t1;
# Problem # 1 (original report): wrong parsing of ucs2 data
SET character_set_connection=ucs2;
SELECT '00' UNION SELECT '10' INTO OUTFILE 'tmpp.txt';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE TABLE t1(a INT);
LOAD DATA INFILE 'tmpp.txt' INTO TABLE t1 CHARACTER SET ucs2
(@b) SET a=REVERSE(@b);
@@ -220,8 +218,6 @@ a
DROP TABLE t1;
# Problem # 2 : if you write and read ucs2 data to a file they're lost
SELECT '00' UNION SELECT '10' INTO OUTFILE 'tmpp2.txt' CHARACTER SET ucs2;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE TABLE t1(a INT);
LOAD DATA INFILE 'tmpp2.txt' INTO TABLE t1 CHARACTER SET ucs2
(@b) SET a=REVERSE(@b);
@@ -4368,6 +4364,7 @@ id INT(11) DEFAULT NULL,
date_column DATE DEFAULT NULL,
KEY(date_column));
INSERT INTO t1 VALUES (1,'2010-09-01'),(2,'2010-10-01');
+INSERT INTO t1 VALUES (3,'2012-09-01'),(4,'2012-10-01'),(5,'2012-10-01');
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range date_column date_column 4 NULL 2 Using index condition
diff --git a/mysql-test/main/ctype_upgrade.test b/mysql-test/main/ctype_upgrade.test
index fee962e7ceb..f2fdc36554e 100644
--- a/mysql-test/main/ctype_upgrade.test
+++ b/mysql-test/main/ctype_upgrade.test
@@ -197,7 +197,7 @@ SELECT GROUP_CONCAT(a ORDER BY BINARY a) FROM maria100004_xxx_croatian_ci GROUP
SHOW CREATE TABLE mysql050614_xxx_croatian_ci;
SELECT GROUP_CONCAT(a ORDER BY BINARY a) FROM mysql050614_xxx_croatian_ci GROUP BY a;
-remove_file $MYSQLD_DATADIR/mysql_upgrade_info;
+remove_file $MYSQLD_DATADIR/mariadb_upgrade_info;
DROP TABLE maria050313_ucs2_croatian_ci_def;
DROP TABLE maria050313_utf8_croatian_ci;
DROP TABLE maria050533_xxx_croatian_ci;
diff --git a/mysql-test/main/ctype_utf8.result b/mysql-test/main/ctype_utf8.result
index 42ab1decc4e..7a40c361e81 100644
--- a/mysql-test/main/ctype_utf8.result
+++ b/mysql-test/main/ctype_utf8.result
@@ -5235,6 +5235,7 @@ id INT(11) DEFAULT NULL,
date_column DATE DEFAULT NULL,
KEY(date_column));
INSERT INTO t1 VALUES (1,'2010-09-01'),(2,'2010-10-01');
+INSERT INTO t1 VALUES (3,'2012-09-01'),(4,'2012-10-01'),(5,'2012-10-01');
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range date_column date_column 4 NULL 2 Using index condition
diff --git a/mysql-test/main/custom_aggregates_i_s.result b/mysql-test/main/custom_aggregates_i_s.result
index cb98aee389d..2a19e9f7cab 100644
--- a/mysql-test/main/custom_aggregates_i_s.result
+++ b/mysql-test/main/custom_aggregates_i_s.result
@@ -39,7 +39,7 @@ explain
select * from t1, (select f1(sal) as a from t1 where id>= 1) q where q.a=t1.sal;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.sal 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.sal 1
2 DERIVED t1 ALL NULL NULL NULL NULL 3 Using where
show status like "%custom_aggregate%";
Variable_name Value
diff --git a/mysql-test/main/delete.result b/mysql-test/main/delete.result
index 7a9963abc71..900c14c5578 100644
--- a/mysql-test/main/delete.result
+++ b/mysql-test/main/delete.result
@@ -92,6 +92,9 @@ select * from t1;
a b
1 apple
drop table t1;
+#
+# IGNORE option
+#
create table t11 (a int NOT NULL, b int, primary key (a));
create table t12 (a int NOT NULL, b int, primary key (a));
create table t2 (a int NOT NULL, b int, primary key (a));
@@ -125,10 +128,14 @@ a b
33 10
0 11
2 12
+explain delete ignore t11.*, t12.* from t11,t12 where t11.a = t12.a and t11.b <> (select b from t2 where t11.a < t2.a);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t12 ALL PRIMARY NULL NULL NULL 3
+1 PRIMARY t11 eq_ref PRIMARY PRIMARY 4 test.t12.a 1 Using where
+2 DEPENDENT SUBQUERY t2 ALL PRIMARY NULL NULL NULL 3 Using where
delete ignore t11.*, t12.* from t11,t12 where t11.a = t12.a and t11.b <> (select b from t2 where t11.a < t2.a);
Warnings:
Warning 1242 Subquery returns more than 1 row
-Warning 1242 Subquery returns more than 1 row
select * from t11;
a b
0 10
diff --git a/mysql-test/main/delete.test b/mysql-test/main/delete.test
index 6d898ec769d..733384193e5 100644
--- a/mysql-test/main/delete.test
+++ b/mysql-test/main/delete.test
@@ -106,9 +106,9 @@ delete t1 from t1, t1 as t2 where t1.b = t2.b and t1.a > t2.a;
select * from t1;
drop table t1;
-#
-# IGNORE option
-#
+--echo #
+--echo # IGNORE option
+--echo #
create table t11 (a int NOT NULL, b int, primary key (a));
create table t12 (a int NOT NULL, b int, primary key (a));
create table t2 (a int NOT NULL, b int, primary key (a));
@@ -122,6 +122,7 @@ select * from t2;
delete t11.*, t12.* from t11,t12 where t11.a = t12.a and t11.b <> (select b from t2 where t11.a < t2.a);
select * from t11;
select * from t12;
+explain delete ignore t11.*, t12.* from t11,t12 where t11.a = t12.a and t11.b <> (select b from t2 where t11.a < t2.a);
delete ignore t11.*, t12.* from t11,t12 where t11.a = t12.a and t11.b <> (select b from t2 where t11.a < t2.a);
select * from t11;
select * from t12;
diff --git a/mysql-test/main/delete_innodb.result b/mysql-test/main/delete_innodb.result
index b9f4c8bdaf5..ae9b415152f 100644
--- a/mysql-test/main/delete_innodb.result
+++ b/mysql-test/main/delete_innodb.result
@@ -17,7 +17,7 @@ a
b
EXPLAIN DELETE b FROM t1 AS a JOIN t1 AS b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE a index NULL PRIMARY 255 NULL 3 Using index
+1 SIMPLE a ALL NULL NULL NULL NULL 3
1 SIMPLE b ALL NULL NULL NULL NULL 3
DELETE b FROM t1 AS a JOIN t1 AS b;
SELECT * FROM t1;
diff --git a/mysql-test/main/derived.result b/mysql-test/main/derived.result
index 268b111cd77..112a72a2bf6 100644
--- a/mysql-test/main/derived.result
+++ b/mysql-test/main/derived.result
@@ -370,6 +370,15 @@ a
2
3
3
+set @save2_derived_optimizer_switch_bug=@@optimizer_switch;
+set @@optimizer_switch=default;
+select * from (select * from t1 union distinct select * from t2 union all select * from t3) X;
+a
+1
+2
+3
+3
+set @@optimizer_switch=@save2_derived_optimizer_switch_bug;
drop table t1, t2, t3;
create table t1 (a int);
create table t2 (a int);
@@ -639,7 +648,7 @@ SELECT f3 FROM t2 HAVING f3 >= 8
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <derived2> system NULL NULL NULL NULL 1 100.00
1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 4 const 1 100.00
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; FirstMatch(<subquery4>); Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 50.00 Using where; FirstMatch(<subquery4>); Using join buffer (flat, BNL join)
4 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 100.00
2 DERIVED t1 system NULL NULL NULL NULL 1 100.00
Warnings:
@@ -1283,14 +1292,14 @@ id select_type table type possible_keys key key_len ref rows r_rows filtered r_f
analyze select * from t1 , ((select distinct t2.a, t2.b from t2 order by c))q where t1.a=q.a;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 1.00 100.00 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 1.00 100.00 100.00
2 DERIVED t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00 Using temporary; Using filesort
# multiple selects in derived table
# NO UNION ALL
analyze select * from t1 , ( (select t2.a from t2 order by c) union (select t2.a from t2 order by c))q where t1.a=q.a;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 1.00 100.00 100.00
+1 PRIMARY <derived2> eq_ref distinct_key distinct_key 5 test.t1.a 1 1.00 100.00 100.00
2 DERIVED t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
3 UNION t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL 6.00 NULL NULL
@@ -1306,7 +1315,7 @@ a a
analyze select * from t1 , ( (select t2.a from t2 order by c) union all (select t2.a from t2 order by c) except(select t3.a from t3 order by b))q where t1.a=q.a;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 0.50 100.00 100.00
+1 PRIMARY <derived2> eq_ref distinct_key distinct_key 5 test.t1.a 1 0.50 100.00 100.00
2 DERIVED t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
3 UNION t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
4 EXCEPT t3 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
@@ -1316,6 +1325,23 @@ a a
3 3
4 4
6 6
+analyze select * from t1 , ( (select t2.a from t2 order by c) union all (select t2.a from t2 order by c) except ALL (select t3.a from t3 order by b))q where t1.a=q.a;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 1.17 100.00 100.00
+2 DERIVED t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
+3 UNION t2 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
+4 EXCEPT t3 ALL NULL NULL NULL NULL 6 6.00 100.00 100.00
+NULL UNIT RESULT <unit2,3,4> ALL NULL NULL NULL NULL NULL 7.00 NULL NULL
+select * from t1 , ( (select t2.a from t2 order by c) union all (select t2.a from t2 order by c) except ALL (select t3.a from t3 order by b))q where t1.a=q.a;
+a a
+3 3
+3 3
+4 4
+4 4
+5 5
+6 6
+6 6
drop table t1,t2,t3;
#
# MDEV-16549: Server crashes in Item_field::fix_fields on query with
@@ -1333,3 +1359,122 @@ DROP TABLE t1;
#
# End of 10.3 tests
#
+#
+# Test of "Derived tables and union can now create distinct keys"
+#
+create table t1 (a int);
+insert into t1 values (100),(100),(100),(100),(100),(100),(100),(100),(100),(100);
+create table duplicates_tbl (a int);
+insert into duplicates_tbl select seq/100 from seq_1_to_10000;
+explain
+select
+t1.a IN ( SELECT COUNT(*)
+from (select a
+from duplicates_tbl
+limit 10000
+) T
+where T.a=5
+) as 'A'
+from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 10
+2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 10000 Using where
+3 DERIVED duplicates_tbl ALL NULL NULL NULL NULL 10000
+select
+t1.a IN ( SELECT COUNT(*)
+from (select a
+from duplicates_tbl
+limit 10000
+) T
+where T.a=5
+) as 'A'
+from t1;
+A
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+explain
+select
+t1.a = all ( SELECT COUNT(*)
+from (select a
+from duplicates_tbl
+limit 10000
+) T
+where T.a=5
+) as 'A'
+from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 10
+2 DEPENDENT SUBQUERY <derived3> ALL NULL NULL NULL NULL 10000 Using where
+3 DERIVED duplicates_tbl ALL NULL NULL NULL NULL 10000
+select
+t1.a = all ( SELECT COUNT(*)
+from (select a
+from duplicates_tbl
+limit 10000
+) T
+where T.a=5
+) as 'A'
+from t1;
+A
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+drop table t1, duplicates_tbl;
+#
+# MDEV-30310
+# Assertion failure in best_access_path upon IN exceeding
+# IN_PREDICATE_CONVERSION_THRESHOLD, derived_with_keys=off
+#
+CREATE TABLE t1 (l_orderkey int);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (o_orderkey int);
+INSERT INTO t2 VALUES (3),(4);
+SET IN_PREDICATE_CONVERSION_THRESHOLD= 2;
+SET OPTIMIZER_SWITCH='derived_with_keys=on';
+SELECT * FROM t1 JOIN t2 ON (l_orderkey = o_orderkey) WHERE l_orderkey IN (1, 2, 3);
+l_orderkey o_orderkey
+SET OPTIMIZER_SWITCH='derived_with_keys=off';
+SELECT * FROM t1 JOIN t2 ON (l_orderkey = o_orderkey) WHERE l_orderkey IN (1, 2, 3);
+l_orderkey o_orderkey
+SET @@IN_PREDICATE_CONVERSION_THRESHOLD=@@global.IN_PREDICATE_CONVERSION_THRESHOLD;
+SET @@OPTIMIZER_SWITCH=@@global.OPTIMIZER_SWITCH;
+DROP TABLE t1, t2;
+#
+# MDEV-30540 Wrong result with IN list length reaching
+# IN_PREDICATE_CONVERSION_THRESHOLD
+#
+CREATE TABLE t1 (a INT PRIMARY KEY);
+INSERT INTO t1 SELECT seq FROM seq_1_to_30;
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+SET IN_PREDICATE_CONVERSION_THRESHOLD=4;
+SELECT a FROM t1 WHERE a IN ( 1, 1, 2, 194 );
+a
+1
+2
+SET IN_PREDICATE_CONVERSION_THRESHOLD=100;
+SELECT a FROM t1 WHERE a IN ( 1, 1, 2, 194 );
+a
+1
+2
+drop table t1;
+#
+# End of 11.0 tests
+#
diff --git a/mysql-test/main/derived.test b/mysql-test/main/derived.test
index b256637ce26..e5f01e15821 100644
--- a/mysql-test/main/derived.test
+++ b/mysql-test/main/derived.test
@@ -260,6 +260,10 @@ insert into t2 values(2),(2);
insert into t3 values(3),(3);
select * from t1 union distinct select * from t2 union all select * from t3;
select * from (select * from t1 union distinct select * from t2 union all select * from t3) X;
+set @save2_derived_optimizer_switch_bug=@@optimizer_switch;
+set @@optimizer_switch=default;
+select * from (select * from t1 union distinct select * from t2 union all select * from t3) X;
+set @@optimizer_switch=@save2_derived_optimizer_switch_bug;
drop table t1, t2, t3;
#
@@ -1126,8 +1130,12 @@ analyze select * from t1 , ( (select t2.a from t2 order by c) union all (select
select * from t1 , ( (select t2.a from t2 order by c) union all (select t2.a from t2 order by c) except(select t3.a from t3 order by b))q where t1.a=q.a;
-drop table t1,t2,t3;
+analyze select * from t1 , ( (select t2.a from t2 order by c) union all (select t2.a from t2 order by c) except ALL (select t3.a from t3 order by b))q where t1.a=q.a;
+select * from t1 , ( (select t2.a from t2 order by c) union all (select t2.a from t2 order by c) except ALL (select t3.a from t3 order by b))q where t1.a=q.a;
+
+
+drop table t1,t2,t3;
--echo #
--echo # MDEV-16549: Server crashes in Item_field::fix_fields on query with
@@ -1147,3 +1155,94 @@ DROP TABLE t1;
--echo #
--echo # End of 10.3 tests
--echo #
+
+--echo #
+--echo # Test of "Derived tables and union can now create distinct keys"
+--echo #
+
+create table t1 (a int);
+insert into t1 values (100),(100),(100),(100),(100),(100),(100),(100),(100),(100);
+
+create table duplicates_tbl (a int);
+insert into duplicates_tbl select seq/100 from seq_1_to_10000;
+
+explain
+select
+ t1.a IN ( SELECT COUNT(*)
+ from (select a
+ from duplicates_tbl
+ limit 10000
+ ) T
+ where T.a=5
+ ) as 'A'
+from t1;
+
+select
+ t1.a IN ( SELECT COUNT(*)
+ from (select a
+ from duplicates_tbl
+ limit 10000
+ ) T
+ where T.a=5
+ ) as 'A'
+from t1;
+
+explain
+select
+ t1.a = all ( SELECT COUNT(*)
+ from (select a
+ from duplicates_tbl
+ limit 10000
+ ) T
+ where T.a=5
+ ) as 'A'
+from t1;
+
+select
+ t1.a = all ( SELECT COUNT(*)
+ from (select a
+ from duplicates_tbl
+ limit 10000
+ ) T
+ where T.a=5
+ ) as 'A'
+from t1;
+
+drop table t1, duplicates_tbl;
+
+--echo #
+--echo # MDEV-30310
+--echo # Assertion failure in best_access_path upon IN exceeding
+--echo # IN_PREDICATE_CONVERSION_THRESHOLD, derived_with_keys=off
+--echo #
+
+CREATE TABLE t1 (l_orderkey int);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (o_orderkey int);
+INSERT INTO t2 VALUES (3),(4);
+SET IN_PREDICATE_CONVERSION_THRESHOLD= 2;
+SET OPTIMIZER_SWITCH='derived_with_keys=on';
+SELECT * FROM t1 JOIN t2 ON (l_orderkey = o_orderkey) WHERE l_orderkey IN (1, 2, 3);
+SET OPTIMIZER_SWITCH='derived_with_keys=off';
+SELECT * FROM t1 JOIN t2 ON (l_orderkey = o_orderkey) WHERE l_orderkey IN (1, 2, 3);
+SET @@IN_PREDICATE_CONVERSION_THRESHOLD=@@global.IN_PREDICATE_CONVERSION_THRESHOLD;
+SET @@OPTIMIZER_SWITCH=@@global.OPTIMIZER_SWITCH;
+DROP TABLE t1, t2;
+
+--echo #
+--echo # MDEV-30540 Wrong result with IN list length reaching
+--echo # IN_PREDICATE_CONVERSION_THRESHOLD
+--echo #
+
+CREATE TABLE t1 (a INT PRIMARY KEY);
+INSERT INTO t1 SELECT seq FROM seq_1_to_30;
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+SET IN_PREDICATE_CONVERSION_THRESHOLD=4;
+SELECT a FROM t1 WHERE a IN ( 1, 1, 2, 194 );
+SET IN_PREDICATE_CONVERSION_THRESHOLD=100;
+SELECT a FROM t1 WHERE a IN ( 1, 1, 2, 194 );
+drop table t1;
+
+--echo #
+--echo # End of 11.0 tests
+--echo #
diff --git a/mysql-test/main/derived_cond_pushdown.result b/mysql-test/main/derived_cond_pushdown.result
index 8c02fb1a4f7..da46ca3ca26 100644
--- a/mysql-test/main/derived_cond_pushdown.result
+++ b/mysql-test/main/derived_cond_pushdown.result
@@ -122,12 +122,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -136,7 +139,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 214"
},
@@ -147,6 +152,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 214",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -156,7 +162,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -208,12 +216,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -227,12 +238,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 300",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -242,7 +256,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -297,12 +313,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -311,7 +330,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 400 or v1.max_c < 135"
},
@@ -322,6 +343,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (max_c > 400 or max_c < 135)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -331,7 +353,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -375,12 +399,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -389,7 +416,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 300 or v1.max_c < 135"
},
@@ -400,6 +429,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (max_c > 300 or max_c < 135)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -409,7 +439,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -442,12 +474,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -456,7 +491,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a > 6"
},
@@ -467,6 +504,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -476,7 +514,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 6"
}
@@ -519,12 +559,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -533,7 +576,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.b > 25"
},
@@ -544,6 +589,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -553,7 +599,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5 and t1.b > 25"
}
@@ -617,12 +665,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -631,7 +682,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a > 7 or v1.a < 2"
},
@@ -642,6 +695,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -651,7 +705,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 7 or t1.a < 2"
}
@@ -708,12 +764,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -722,7 +781,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.a > 7 or v2.a > 5"
},
@@ -733,6 +794,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -742,7 +804,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5 and (t1.a > 7 or t1.a > 5)"
}
@@ -785,12 +849,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -799,7 +866,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a > 4 or v1.a < 2"
},
@@ -810,6 +879,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -819,7 +889,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 4 or t1.a < 2"
}
@@ -855,12 +927,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -869,7 +944,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 2 and v1.max_c > 400"
},
@@ -880,6 +957,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 400",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -889,7 +967,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 2"
}
@@ -928,7 +1008,7 @@ explain select * from v_double as v,t2_double as t where
(v.a=t.a) and (v.avg_a>0.45) and (v.b>10);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t.a 2 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t.a 1 Using where
2 DERIVED t1_double ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort
explain format=json select * from v_double as v,t2_double as t where
(v.a=t.a) and (v.avg_a>0.45) and (v.b>10);
@@ -936,12 +1016,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a is not null"
}
@@ -955,12 +1038,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v.avg_a > 0.45 and v.b > 10",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "avg_a < 22.333 and avg_a > 0.45",
"filesort": {
"sort_key": "t1_double.b, t1_double.c",
@@ -970,7 +1056,9 @@ EXPLAIN
"table": {
"table_name": "t1_double",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1_double.b > 12.2 and t1_double.b > 10"
}
@@ -997,7 +1085,7 @@ explain select * from v_decimal as v,t2_decimal as t where
(v.a=t.a) and (v.avg_c>15) and (v.b>1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 3 test.t.a 2 Using where
+1 PRIMARY <derived2> ref key0 key0 3 test.t.a 1 Using where
2 DERIVED t1_decimal ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort
explain format=json select * from v_decimal as v,t2_decimal as t where
(v.a=t.a) and (v.avg_c>15) and (v.b>1);
@@ -1005,12 +1093,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a is not null"
}
@@ -1024,12 +1115,15 @@ EXPLAIN
"key_length": "3",
"used_key_parts": ["a"],
"ref": ["test.t.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v.avg_c > 15 and v.b > 1",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "avg_c > 12 and avg_c > 15",
"filesort": {
"sort_key": "t1_decimal.a, t1_decimal.b",
@@ -1039,7 +1133,9 @@ EXPLAIN
"table": {
"table_name": "t1_decimal",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1_decimal.b > 1"
}
@@ -1097,12 +1193,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1111,7 +1210,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a > 7 and v1.max_c > 300 or v1.a < 4 and v1.max_c < 500"
},
@@ -1122,6 +1223,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (t1.a > 7 and max_c > 300 or t1.a < 4 and max_c < 500)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -1131,7 +1233,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 7 or t1.a < 4"
}
@@ -1196,12 +1300,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1210,7 +1317,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 2 and v1.max_c > 120 or v1.a > 7"
},
@@ -1221,6 +1330,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (t1.a < 2 and max_c > 120 or t1.a > 7)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -1230,7 +1340,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 2 or t1.a > 7"
}
@@ -1284,12 +1396,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1298,7 +1413,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 2 and v1.max_c > 120 or v1.a > 7"
},
@@ -1309,6 +1426,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (t1.a < 2 and max_c > 120 or t1.a > 7)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -1318,7 +1436,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 2 or t1.a > 7"
}
@@ -1361,12 +1481,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1375,7 +1498,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 2 and v1.max_c < 200 or v1.a > 4"
},
@@ -1386,6 +1511,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (t1.a < 2 and max_c < 200 or t1.a > 4 and max_c < 500)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -1395,7 +1521,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 2 or t1.a > 4"
}
@@ -1448,12 +1576,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1462,7 +1593,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 400 or v1.max_c < 135"
},
@@ -1473,6 +1606,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (max_c > 400 or max_c < 135)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -1482,7 +1616,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1501,12 +1637,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1515,7 +1654,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 400 or v1.max_c < 135"
},
@@ -1526,6 +1667,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (max_c > 400 or max_c < 135)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -1535,7 +1677,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1570,12 +1714,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = 1 and t2.b is not null"
}
@@ -1589,12 +1736,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t2.b"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 1",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.b",
@@ -1604,7 +1754,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1"
}
@@ -1635,12 +1787,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.d is not null"
}
@@ -1654,12 +1809,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["max_c"],
"ref": ["test.t2.d"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 5 and v1.max_c = t2.d",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.b",
@@ -1669,7 +1827,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 5"
}
@@ -1707,12 +1867,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a < 5 and t2.a is not null"
}
@@ -1726,11 +1889,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -1740,7 +1906,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 5"
}
@@ -1769,12 +1937,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null and t2.a is not null"
}
@@ -1788,11 +1959,14 @@ EXPLAIN
"key_length": "10",
"used_key_parts": ["a", "b"],
"ref": ["test.t2.a", "test.t2.a"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -1802,7 +1976,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = t1.a"
}
@@ -1836,12 +2012,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.c > 150 and t2.c is not null"
}
@@ -1855,11 +2034,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["max_c"],
"ref": ["test.t2.c"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 150",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -1869,7 +2051,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1899,12 +2083,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = 3"
}
@@ -1914,7 +2101,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 3 and v1.b = 3"
},
@@ -1924,13 +2113,16 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 3 and t1.b = 3"
}
@@ -1959,12 +2151,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = 2"
}
@@ -1974,7 +2169,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 1 and v1.b = 21"
},
@@ -1984,13 +2181,16 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.b = 21"
}
@@ -2025,17 +2225,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v.a = 'c' and v.b < 'Hermes'",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 9",
"filesort": {
"sort_key": "t1_char.b",
@@ -2045,7 +2249,9 @@ EXPLAIN
"table": {
"table_name": "t1_char",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1_char.a = 'c' and t1_char.b < 'Hermes'"
}
@@ -2062,7 +2268,9 @@ EXPLAIN
"table": {
"table_name": "t",
"access_type": "ALL",
+ "loops": 12,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -2100,7 +2308,7 @@ explain select * from v_decimal as v,t2_decimal as t where
(v.a=v.b) and (v.b=t.b) and ((t.b>1) or (v.a=1));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 6 test.t.b,test.t.b 2
+1 PRIMARY <derived2> ref key0 key0 6 test.t.b,test.t.b 1
2 DERIVED t1_decimal ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort
explain format=json select * from v_decimal as v,t2_decimal as t where
(v.a=v.b) and (v.b=t.b) and ((t.b>1) or (v.a=1));
@@ -2108,12 +2316,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t.b > 1 or t.b = 1) and t.b is not null and t.b is not null"
}
@@ -2127,11 +2338,14 @@ EXPLAIN
"key_length": "6",
"used_key_parts": ["a", "b"],
"ref": ["test.t.b", "test.t.b"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "avg_c > 12",
"filesort": {
"sort_key": "t1_decimal.a, t1_decimal.b",
@@ -2141,7 +2355,9 @@ EXPLAIN
"table": {
"table_name": "t1_decimal",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1_decimal.b = t1_decimal.a and (t1_decimal.a > 1 or t1_decimal.a = 1)"
}
@@ -2187,12 +2403,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a < 4 or t2.c > 150"
}
@@ -2202,7 +2421,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -2212,6 +2433,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (t1.a < 4 or max_c > 150)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -2221,7 +2443,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2256,12 +2480,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 5 and t2.c > 250 and t2.a is not null and t2.c is not null"
}
@@ -2275,11 +2502,14 @@ EXPLAIN
"key_length": "10",
"used_key_parts": ["a", "max_c"],
"ref": ["test.t2.a", "test.t2.c"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 250",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -2289,7 +2519,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -2336,12 +2568,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = 8"
}
@@ -2351,7 +2586,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 8 and v1.max_c = 404"
},
@@ -2361,6 +2598,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c = 404",
"filesort": {
"sort_key": "t1.b",
@@ -2370,7 +2608,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 8"
}
@@ -2406,12 +2646,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.d is not null"
}
@@ -2425,12 +2668,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["max_c"],
"ref": ["test.t2.d"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a > 3 and v1.max_c > 200 and t2.b < v1.b and t2.d = v1.max_c",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -2440,7 +2686,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 3"
}
@@ -2468,7 +2716,7 @@ explain select * from v_double as v,t2_double as t where
(v.b=v.c) and (v.c=t.c) and ((t.c>10) or (v.a=1));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 18 test.t.c,test.t.c 2 Using where
+1 PRIMARY <derived2> ref key0 key0 18 test.t.c,test.t.c 1 Using where
2 DERIVED t1_double ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort
explain format=json select * from v_double as v,t2_double as t where
(v.b=v.c) and (v.c=t.c) and ((t.c>10) or (v.a=1));
@@ -2476,12 +2724,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.c is not null and t.c is not null"
}
@@ -2495,12 +2746,15 @@ EXPLAIN
"key_length": "18",
"used_key_parts": ["b", "c"],
"ref": ["test.t.c", "test.t.c"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.c > 10 or v.a = 1",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "avg_a < 22.333 and (t1_double.b > 10 or t1_double.a = 1)",
"filesort": {
"sort_key": "t1_double.b, t1_double.c",
@@ -2510,7 +2764,9 @@ EXPLAIN
"table": {
"table_name": "t1_double",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1_double.c = t1_double.b and t1_double.b > 12.2"
}
@@ -2543,7 +2799,7 @@ explain select * from v_double as v,t2_double as t where
(((v.a>0.2) or (v.b<17)) or (t.c>17)) and (t.c=v.c) and (v.c>18);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 9 test.t.c 2 Using where
+1 PRIMARY <derived2> ref key0 key0 9 test.t.c 1 Using where
2 DERIVED t1_double ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort
explain format=json select * from v_double as v,t2_double as t where
(((v.a>0.2) or (v.b<17)) or (t.c>17)) and (t.c=v.c) and (v.c>18);
@@ -2551,12 +2807,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.c > 18 and t.c is not null"
}
@@ -2570,12 +2829,15 @@ EXPLAIN
"key_length": "9",
"used_key_parts": ["c"],
"ref": ["test.t.c"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v.a > 0.2 or v.b < 17 or t.c > 17",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "avg_a < 22.333 and (t1_double.a > 0.2 or t1_double.b < 17 or t1_double.c > 17)",
"filesort": {
"sort_key": "t1_double.b, t1_double.c",
@@ -2585,7 +2847,9 @@ EXPLAIN
"table": {
"table_name": "t1_double",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1_double.b > 12.2 and t1_double.c > 18"
}
@@ -2657,17 +2921,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(v.a > 4 or v.a = 2 or v.b > 3) and v.avg_c = 13",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "avg_c > 12 and avg_c = 13",
"filesort": {
"sort_key": "t1_decimal.a, t1_decimal.b",
@@ -2677,7 +2945,9 @@ EXPLAIN
"table": {
"table_name": "t1_decimal",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1_decimal.a > 4 or t1_decimal.a = 2 or t1_decimal.b > 3"
}
@@ -2694,7 +2964,9 @@ EXPLAIN
"table": {
"table_name": "t",
"access_type": "ALL",
+ "loops": 9,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -2731,12 +3003,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null and t2.a is not null"
}
@@ -2750,12 +3025,15 @@ EXPLAIN
"key_length": "10",
"used_key_parts": ["a", "b"],
"ref": ["test.t2.a", "test.t2.a"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 300",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -2765,7 +3043,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = t1.a and t1.a > 5"
}
@@ -2807,12 +3087,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a < 2 and t2.c > 900"
}
@@ -2822,7 +3105,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -2831,6 +3116,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -2840,7 +3126,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2874,12 +3162,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null and t2.b is not null"
}
@@ -2893,11 +3184,14 @@ EXPLAIN
"key_length": "10",
"used_key_parts": ["a", "b"],
"ref": ["test.t2.a", "test.t2.b"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -2907,7 +3201,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2955,12 +3251,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -2969,7 +3268,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -2979,6 +3280,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -2988,7 +3290,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3060,12 +3364,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -3074,7 +3381,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -3084,6 +3393,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3093,7 +3403,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3137,12 +3449,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a < 2 and t2.c > 900"
}
@@ -3152,7 +3467,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -3162,6 +3479,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3171,7 +3489,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3214,12 +3534,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null and t2.a is not null"
}
@@ -3233,11 +3556,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3247,7 +3573,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3267,12 +3595,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 18,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.b < 50",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3282,7 +3613,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5 and t1.b < 50"
}
@@ -3345,12 +3678,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b < 50"
}
@@ -3360,7 +3696,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -3370,6 +3708,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3379,7 +3718,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3399,12 +3740,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["v1.b"],
+ "loops": 180,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.a = v1.a or v1.a = t2.a",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3414,7 +3758,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -3457,12 +3803,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -3471,7 +3820,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -3481,6 +3832,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3490,7 +3842,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3506,7 +3860,9 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 180,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "incremental",
@@ -3516,6 +3872,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3525,7 +3882,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -3572,12 +3931,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -3586,7 +3948,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c < 300"
},
@@ -3596,6 +3960,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c < 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3605,7 +3970,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3621,7 +3988,9 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 180,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.b < 50 or v2.b = 19"
},
@@ -3632,6 +4001,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3641,7 +4011,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5 and (t1.b < 50 or t1.b = 19)"
}
@@ -3679,12 +4051,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null and t2.a is not null and t2.a is not null"
}
@@ -3698,11 +4073,14 @@ EXPLAIN
"key_length": "10",
"used_key_parts": ["a", "b"],
"ref": ["test.t2.a", "test.t2.a"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3712,7 +4090,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = t1.a"
}
@@ -3733,12 +4113,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 18,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.max_c < 300",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c < 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3748,7 +4131,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -3783,12 +4168,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -3797,7 +4185,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 1 and v1.b > 10"
},
@@ -3808,6 +4198,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.b",
@@ -3817,7 +4208,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.b > 10"
}
@@ -3838,11 +4231,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["v1.b"],
+ "loops": 180,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -3852,7 +4248,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5 and t1.b > 10"
}
@@ -3898,17 +4296,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v.a = 'b' and (v.b = 'Vika' or v.b = 'Ali')",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 9",
"filesort": {
"sort_key": "t1_char.b",
@@ -3918,7 +4320,9 @@ EXPLAIN
"table": {
"table_name": "t1_char",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1_char.a = 'b' and (t1_char.b = 'Vika' or t1_char.b = 'Ali')"
}
@@ -3935,7 +4339,9 @@ EXPLAIN
"table": {
"table_name": "t",
"access_type": "ALL",
+ "loops": 12,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a = 'b'"
},
@@ -4005,12 +4411,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -4024,12 +4433,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v3.b < 50 or v3.b = 33",
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "min_c > 109",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4039,7 +4451,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 10 and (t1.b < 50 or t1.b = 33)"
}
@@ -4056,7 +4470,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 18,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c < 500"
},
@@ -4066,6 +4482,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c < 500",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4075,7 +4492,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -4091,7 +4510,9 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 360,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.max_c > 300"
},
@@ -4102,6 +4523,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4111,7 +4533,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -4164,12 +4588,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b is not null"
}
@@ -4183,12 +4610,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t2.b"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 130 and v1.a is not null",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 130",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4198,7 +4628,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -4219,12 +4651,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["v1.a"],
+ "loops": 18,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.min_c < 130",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "min_c < 707 and min_c < 130",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4234,7 +4669,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -4320,12 +4757,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -4334,7 +4774,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.avg_c < 400 or v1.a > 1"
},
@@ -4345,6 +4787,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (avg_c < 400 or t1.a > 1)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4354,7 +4797,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -4375,12 +4820,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["v1.a"],
+ "loops": 180,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.min_c < 200",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "min_c < 707 and min_c < 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4390,7 +4838,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -4411,12 +4861,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["v1.b"],
+ "loops": 360,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v3.avg_c > 170 or v3.a < 5",
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "avg_c > 170 or t1.a < 5",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4426,7 +4879,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 8"
}
@@ -4487,12 +4942,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -4501,7 +4959,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(v1.a = 1 or v1.max_c < 300) and v1.b > 25"
},
@@ -4512,6 +4972,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (t1.a = 1 or max_c < 300)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4521,7 +4982,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 25"
}
@@ -4568,12 +5031,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -4587,12 +5053,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 300 and v1.b < 30",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4602,7 +5071,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5 and t1.b < 30"
}
@@ -4656,12 +5127,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.c > 800 and t2.b is not null"
}
@@ -4675,12 +5149,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t2.b"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 5",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4690,7 +5167,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 5"
}
@@ -4709,12 +5188,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.d > 800"
}
@@ -4724,7 +5206,9 @@ EXPLAIN
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 100 and v1.a > 7"
},
@@ -4734,6 +5218,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 100",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4743,7 +5228,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 7"
}
@@ -4820,12 +5307,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b = 19"
}
@@ -4835,7 +5325,9 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b = 19 and v1.a < 5"
},
@@ -4845,6 +5337,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a",
@@ -4854,7 +5347,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 19 and t1.a < 5"
}
@@ -4873,12 +5368,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -4887,7 +5385,9 @@ EXPLAIN
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 400 or v1.avg_c > 270"
},
@@ -4898,6 +5398,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (max_c > 400 or avg_c > 270)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -4907,7 +5408,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -4995,12 +5498,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -5009,7 +5515,9 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 1 or v1.a = 6"
},
@@ -5020,6 +5528,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -5029,7 +5538,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 or t1.a = 6"
}
@@ -5048,12 +5559,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -5062,7 +5576,9 @@ EXPLAIN
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a > 3 and v1.b > 27 or v1.max_c > 550"
},
@@ -5073,6 +5589,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (t1.a > 3 and t1.b > 27 or max_c > 550)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -5082,7 +5599,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -5171,12 +5690,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = 1"
}
@@ -5186,7 +5708,9 @@ EXPLAIN
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 1 and (v1.max_c < 500 or v1.avg_c > 500)"
},
@@ -5197,6 +5721,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and (max_c < 500 or avg_c > 500)",
"filesort": {
"sort_key": "t1.b",
@@ -5206,7 +5731,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1"
}
@@ -5225,12 +5752,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a < 2"
}
@@ -5240,7 +5770,9 @@ EXPLAIN
"table": {
"table_name": "<derived5>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.b > 10"
},
@@ -5251,6 +5783,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 5,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -5260,7 +5793,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5 and t1.b > 10"
}
@@ -5279,12 +5814,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.c is not null"
}
@@ -5298,12 +5836,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["max_c"],
"ref": ["test.t2.c"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.b < 10",
"materialized": {
"query_block": {
"select_id": 6,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -5313,7 +5854,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5 and t1.b < 10"
}
@@ -5368,12 +5911,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -5382,7 +5928,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 40,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v_union.a < 3 and v_union.c > 100"
},
@@ -5398,6 +5946,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 109 and c > 100",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -5407,7 +5956,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 10 and t1.a < 3"
}
@@ -5421,6 +5972,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c > 100",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -5430,7 +5982,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a < 3"
}
@@ -5490,12 +6044,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -5504,7 +6061,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 40,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(v_union.a < 2 or v_union.c > 800) and v_union.b > 12"
},
@@ -5521,6 +6080,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 109 and (t1.a < 2 or c > 800)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -5530,7 +6090,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 10 and t1.b > 12"
}
@@ -5544,6 +6106,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and (t1.a < 2 or c > 800)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -5553,7 +6116,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.b > 12"
}
@@ -5590,7 +6155,7 @@ explain select * from v_union,t2 where
(v_union.a=1) and (v_union.a=t2.a) and (v_union.c<200);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 40 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <derived2> ALL distinct_key NULL NULL NULL 40 Using where; Using join buffer (flat, BNL join)
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
3 UNION t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
@@ -5600,12 +6165,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = 1"
}
@@ -5615,7 +6183,10 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "possible_keys": ["distinct_key"],
+ "loops": 9,
"rows": 40,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v_union.a = 1 and v_union.c < 200"
},
@@ -5631,6 +6202,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 109 and c < 200",
"filesort": {
"sort_key": "t1.b",
@@ -5640,7 +6212,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1"
}
@@ -5654,6 +6228,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c < 200",
"filesort": {
"sort_key": "t1.b",
@@ -5663,7 +6238,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.b > 10"
}
@@ -5700,7 +6277,7 @@ explain select * from v_char as v,t2_char as t where
(v.a=t.a) and (v.b='Vika') and (v.max_c>2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL NULL NULL NULL NULL 12 Using where
-1 PRIMARY <derived2> ref key0 key0 2 test.t.a 2 Using where
+1 PRIMARY <derived2> ref key0 key0 2 test.t.a 1 Using where
2 DERIVED t1_char ALL NULL NULL NULL NULL 12 Using where; Using temporary; Using filesort
explain format=json select * from v_char as v,t2_char as t where
(v.a=t.a) and (v.b='Vika') and (v.max_c>2);
@@ -5708,12 +6285,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a is not null"
}
@@ -5727,12 +6307,15 @@ EXPLAIN
"key_length": "2",
"used_key_parts": ["a"],
"ref": ["test.t.a"],
- "rows": 2,
+ "loops": 12,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v.b = 'Vika' and v.max_c > 2",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 9 and max_c > 2",
"filesort": {
"sort_key": "t1_char.a",
@@ -5742,7 +6325,9 @@ EXPLAIN
"table": {
"table_name": "t1_char",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1_char.b = 'Vika'"
}
@@ -5779,7 +6364,7 @@ and ((v_union.c>800) or (v1.max_c>200));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
1 PRIMARY <derived4> ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 40 Using where; Using join buffer (incremental, BNL join)
+1 PRIMARY <derived2> ALL distinct_key NULL NULL NULL 40 Using where; Using join buffer (incremental, BNL join)
4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
3 UNION t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
@@ -5791,12 +6376,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = 1"
}
@@ -5806,7 +6394,9 @@ EXPLAIN
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 1"
},
@@ -5816,6 +6406,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.b",
@@ -5825,7 +6416,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1"
}
@@ -5842,7 +6435,10 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "possible_keys": ["distinct_key"],
+ "loops": 180,
"rows": 40,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v_union.a = 1"
},
@@ -5859,6 +6455,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 109",
"filesort": {
"sort_key": "t1.b",
@@ -5868,7 +6465,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1"
}
@@ -5882,6 +6481,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300",
"filesort": {
"sort_key": "t1.b",
@@ -5891,7 +6491,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.b > 10"
}
@@ -5936,7 +6538,7 @@ explain select * from v2_union as v,t2 where
((v.a=6) or (v.a=8)) and (v.c>200) and (v.a=t2.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 6 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 6 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
3 UNION t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
4 UNION t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
@@ -5947,12 +6549,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t2.a = 6 or t2.a = 8) and t2.a is not null"
}
@@ -5961,12 +6566,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 6,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v.c > 200",
"materialized": {
@@ -5978,6 +6585,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 109 and c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -5987,7 +6595,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 10 and (t1.a = 6 or t1.a = 8)"
}
@@ -6001,6 +6611,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -6010,7 +6621,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and (t1.a = 6 or t1.a = 8)"
}
@@ -6024,6 +6637,7 @@ EXPLAIN
"query_block": {
"select_id": 4,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "c < 707 and c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -6033,7 +6647,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c > 300 and (t1.a = 6 or t1.a = 8)"
}
@@ -6125,7 +6741,7 @@ a b c a b c d
explain select * from v3_union as v,t2 where (v.a=t2.a) and (v.c>6);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 4 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 4 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where
3 UNION t1 ALL NULL NULL NULL NULL 20 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
@@ -6134,12 +6750,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -6148,12 +6767,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v.c > 6",
"materialized": {
@@ -6165,12 +6786,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 10 and t1.a + 1 > 6"
}
@@ -6182,12 +6806,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.c > 100 and t1.c > 6"
}
@@ -6257,7 +6884,7 @@ a b c a b c d
explain select * from v3_union as v,t2 where (v.a=t2.a) and ((t2.a>1) or (v.b<20));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 4 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 4 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where
3 UNION t1 ALL NULL NULL NULL NULL 20 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
@@ -6266,12 +6893,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -6280,12 +6910,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 1 or v.b < 20",
"materialized": {
@@ -6297,12 +6929,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 10 and (t1.a > 1 or t1.b < 20)"
}
@@ -6314,12 +6949,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.c > 100 and (t1.a > 1 or t1.b < 20)"
}
@@ -6356,7 +6994,7 @@ explain select * from v3_union as v,t2 where
(v.a=t2.a) and ((v.b=19) or (v.b=21)) and ((v.c<3) or (v.c>600));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 4 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 4 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where
3 UNION t1 ALL NULL NULL NULL NULL 20 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
@@ -6366,12 +7004,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -6380,12 +7021,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(v.b = 19 or v.b = 21) and (v.c < 3 or v.c > 600)",
"materialized": {
@@ -6397,12 +7040,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 10 and (t1.b = 19 or t1.b = 21) and (t1.a + 1 < 3 or t1.a + 1 > 600)"
}
@@ -6414,12 +7060,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.c > 100 and (t1.b = 19 or t1.b = 21) and (t1.c < 3 or t1.c > 600)"
}
@@ -6454,7 +7103,7 @@ a b c a b c d
explain select * from v4_union as v,t2 where (v.a=t2.a) and (v.b<20);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 4 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 4 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
3 UNION t1 ALL NULL NULL NULL NULL 20 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
@@ -6463,12 +7112,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -6477,12 +7129,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v.b < 20",
"materialized": {
@@ -6494,6 +7148,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 109",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -6503,7 +7158,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 10 and t1.b < 20"
}
@@ -6517,12 +7174,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.b < 20"
}
@@ -6575,7 +7235,7 @@ explain select * from v4_union as v,t2 where
(v.a=t2.a) and ((t2.a<3) or (v.b<40)) and (v.c>500);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 4 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 4 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
3 UNION t1 ALL NULL NULL NULL NULL 20 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
@@ -6585,12 +7245,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -6599,12 +7262,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t2.a < 3 or v.b < 40) and v.c > 500",
"materialized": {
@@ -6616,6 +7281,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 109 and c > 500",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -6625,7 +7291,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 10 and (t1.a < 3 or t1.b < 40)"
}
@@ -6639,12 +7307,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and (t1.a < 3 or t1.b < 40) and t1.c + 100 > 500"
}
@@ -6709,17 +7380,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v4.a < 13",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "v1.a, v1.b",
"temporary_table": {
@@ -6728,12 +7403,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 15 and v1.a < 13",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -6743,7 +7421,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 15 and t1.a < 13"
}
@@ -6767,7 +7447,9 @@ EXPLAIN
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 20,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a > 5 and v1.b > 12"
},
@@ -6777,6 +7459,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -6786,7 +7469,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5 and t1.b > 12"
}
@@ -6829,12 +7514,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null and t2.a is not null"
}
@@ -6848,11 +7536,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "v1.a, v1.b",
"temporary_table": {
@@ -6861,12 +7552,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 15",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -6876,7 +7570,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 15"
}
@@ -6904,12 +7600,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 18,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b > 30",
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -6919,7 +7618,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 30"
}
@@ -6964,12 +7665,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 1 and t2.a is not null and t2.a is not null"
}
@@ -6983,12 +7687,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v4.min_c > 100",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "min_c > 100",
"filesort": {
"sort_key": "v1.a, v1.b",
@@ -6998,12 +7705,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 15 and v1.a > 1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7013,7 +7723,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 15 and t1.a > 1"
}
@@ -7041,12 +7753,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 18,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b < 30",
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7056,7 +7771,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.b < 30"
}
@@ -7186,12 +7903,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -7200,7 +7920,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v4.b > 10 and v4.a > 1 or v4.b < 20"
},
@@ -7211,6 +7933,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "v1.a, v1.b",
"temporary_table": {
@@ -7219,12 +7942,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 15 and (v1.b > 10 and v1.a > 1 or v1.b < 20)",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7234,7 +7960,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 15 and (t1.b > 10 and t1.a > 1 or t1.b < 20)"
}
@@ -7262,12 +7990,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["v4.a"],
+ "loops": 180,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.max_c > 200",
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7277,7 +8008,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -7315,17 +8048,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v4.a > 12 and v4.min_c < 300 and v4.b > 13 or v4.a < 1",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "v1.a > 12 and min_c < 300 and v1.b > 13 or v1.a < 1",
"filesort": {
"sort_key": "v1.a, v1.b",
@@ -7335,12 +8072,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 15 and (v1.a > 12 and v1.b > 13 or v1.a < 1)",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7350,7 +8090,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 15 and (t1.a > 12 and t1.b > 13 or t1.a < 1)"
}
@@ -7374,7 +8116,9 @@ EXPLAIN
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 20,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -7383,6 +8127,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7392,7 +8137,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -7432,17 +8179,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v4.b = v4.a and v4.min_c < 100 and v4.a is not null",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "min_c < 100",
"filesort": {
"sort_key": "v1.a, v1.b",
@@ -7452,12 +8203,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b = v1.a and v1.a < 15",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7467,7 +8221,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = t1.a and t1.a < 15"
}
@@ -7495,11 +8251,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["v4.a"],
+ "loops": 20,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7509,7 +8268,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -7549,17 +8310,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v4.b = v4.a and v4.a < 30 and v4.a is not null",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "v1.a, v1.b",
"temporary_table": {
@@ -7568,12 +8333,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b = v1.a and v1.a < 15 and v1.a < 30",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7583,7 +8351,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = t1.a and t1.a < 15 and t1.a < 30"
}
@@ -7611,11 +8381,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["v4.a"],
+ "loops": 20,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7625,7 +8398,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5 and t1.b < 30"
}
@@ -7665,17 +8440,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v4.b = v4.a and (v4.a < 30 or v4.a > 2) and v4.a is not null",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "v1.a, v1.b",
"temporary_table": {
@@ -7684,12 +8463,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b = v1.a and v1.a < 15 and (v1.a < 30 or v1.a > 2)",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7699,7 +8481,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = t1.a and t1.a < 15 and (t1.a < 30 or t1.a > 2)"
}
@@ -7727,11 +8511,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["v4.a"],
+ "loops": 20,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7741,7 +8528,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5 and (t1.b < 30 or t1.b > 2)"
}
@@ -7789,17 +8578,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(v4.a < 12 and v4.b > 13 or v4.a > 10) and v4.min_c > 100 and v4.min_c is not null",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "min_c > 100",
"filesort": {
"sort_key": "v1.a, v1.b",
@@ -7809,12 +8602,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 15 and (v1.a < 12 and v1.b > 13 or v1.a > 10)",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7824,7 +8620,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 15 and (t1.a < 12 and t1.b > 13 or t1.a > 10)"
}
@@ -7852,11 +8650,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["max_c"],
"ref": ["v4.min_c"],
+ "loops": 20,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707 and max_c > 100",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7866,7 +8667,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -7913,12 +8716,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.c > 100 and t2.c is not null"
}
@@ -7932,12 +8738,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["min_c"],
"ref": ["test.t2.c"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v4.a < 12 and t2.b > 13 or v4.a > 10",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "min_c > 100",
"filesort": {
"sort_key": "v1.a, v1.b",
@@ -7947,12 +8756,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 15 and (v1.a < 12 or v1.a > 10)",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -7962,7 +8774,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 15 and (t1.a < 12 or t1.a > 10)"
}
@@ -7986,7 +8800,9 @@ EXPLAIN
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 18,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -7995,6 +8811,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 707",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -8004,7 +8821,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 5"
}
@@ -8159,6 +8978,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "0 or <in_optimizer>(2,<exists>(subquery#3))",
"nested_loop": [
{
@@ -8173,8 +8993,10 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
- "filtered": 100,
+ "cost": "COST_REPLACED",
+ "filtered": 50,
"attached_condition": "t2.b = 2",
"first_match": "t1"
}
@@ -8184,6 +9006,7 @@ EXPLAIN
{
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -8194,17 +9017,22 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["c"],
"ref": ["func"],
- "rows": 2,
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 5,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.c = 2"
}
@@ -8246,7 +9074,7 @@ SELECT d FROM v4 WHERE s > a
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t1)
-3 DEPENDENT SUBQUERY <derived5> index_subquery key0 key0 5 func 2 Using where
+3 DEPENDENT SUBQUERY <derived5> index_subquery key0 key0 5 func 1 Using where
5 DERIVED t4 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort
explain format=json SELECT * FROM t1 WHERE a IN (
SELECT b FROM v2 WHERE b < a OR b IN (
@@ -8257,6 +9085,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "0 or <in_optimizer>(2,<exists>(subquery#3))",
"nested_loop": [
{
@@ -8271,8 +9100,10 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
- "filtered": 100,
+ "cost": "COST_REPLACED",
+ "filtered": 50,
"attached_condition": "t2.b = 2",
"first_match": "t1"
}
@@ -8282,6 +9113,7 @@ EXPLAIN
{
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -8292,11 +9124,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"ref": ["func"],
- "rows": 2,
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 5,
+ "cost": "COST_REPLACED",
"having_condition": "s > 2",
"filesort": {
"sort_key": "t4.d",
@@ -8306,7 +9141,9 @@ EXPLAIN
"table": {
"table_name": "t4",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -8361,23 +9198,29 @@ EXPLAIN
{
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.b = 1",
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b = 1"
}
@@ -8416,12 +9259,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -8434,18 +9280,23 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "loops": 2,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "trigcond(v2.b is null) and trigcond(trigcond(t1.a is not null))",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -8471,23 +9322,29 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "<nop>(v1.i <= 3)",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "<nop>(t1.i <= 3)"
}
@@ -8532,12 +9389,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "<in_optimizer>(t1.b,<exists>(subquery#2)) or t1.b = 100"
}
@@ -8547,6 +9407,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -8557,17 +9418,22 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["pk2"],
"ref": ["func"],
- "rows": 2,
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -8594,12 +9460,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "<in_optimizer>(t1.b,<exists>(subquery#3)) or t1.b = 100"
}
@@ -8609,6 +9478,7 @@ EXPLAIN
{
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -8619,17 +9489,22 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["pk2"],
"ref": ["func"],
- "rows": 2,
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -8678,23 +9553,29 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 50",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 50"
}
@@ -8736,17 +9617,21 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.s < 50",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "s < 50",
"filesort": {
"sort_key": "t3.a",
@@ -8756,7 +9641,9 @@ EXPLAIN
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -8794,13 +9681,14 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<subquery2>",
"access_type": "system",
- "rows": 1,
- "filtered": 100,
+ "rows": 0,
+ "filtered": 0,
"materialized": {
"unique": 1,
"query_block": {
@@ -8819,18 +9707,23 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b = 2",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 2"
}
@@ -8864,13 +9757,14 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<subquery2>",
"access_type": "system",
- "rows": 1,
- "filtered": 100,
+ "rows": 0,
+ "filtered": 0,
"materialized": {
"unique": 1,
"query_block": {
@@ -8886,18 +9780,23 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.f = 2",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.f = 2"
}
@@ -8912,7 +9811,9 @@ EXPLAIN
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 2,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.pk > 2"
},
@@ -8922,6 +9823,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -8931,7 +9833,9 @@ EXPLAIN
"key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["pk"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t.pk > 2"
}
@@ -8964,13 +9868,14 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<subquery3>",
"access_type": "system",
- "rows": 1,
- "filtered": 100,
+ "rows": 0,
+ "filtered": 0,
"materialized": {
"unique": 1,
"query_block": {
@@ -8986,18 +9891,23 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "sq.i = 3",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.i = 3"
}
@@ -9031,13 +9941,14 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<subquery3>",
"access_type": "system",
- "rows": 1,
- "filtered": 100,
+ "rows": 0,
+ "filtered": 0,
"materialized": {
"unique": 1,
"query_block": {
@@ -9053,18 +9964,23 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "sq.i = 2.71",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.i = 2.7100000381469727"
}
@@ -9093,13 +10009,14 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<subquery3>",
"access_type": "system",
- "rows": 1,
- "filtered": 100,
+ "rows": 0,
+ "filtered": 0,
"materialized": {
"unique": 1,
"query_block": {
@@ -9115,18 +10032,23 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "sq.i = 3.21",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.i = 3.21"
}
@@ -9155,13 +10077,14 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<subquery3>",
"access_type": "system",
- "rows": 1,
- "filtered": 100,
+ "rows": 0,
+ "filtered": 0,
"materialized": {
"unique": 1,
"query_block": {
@@ -9177,18 +10100,23 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "sq.i = 'aa'",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.i = 'aa'"
}
@@ -9219,13 +10147,14 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<subquery3>",
"access_type": "system",
- "rows": 1,
- "filtered": 100,
+ "rows": 0,
+ "filtered": 0,
"materialized": {
"unique": 1,
"query_block": {
@@ -9241,18 +10170,23 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "sq.i = '2007-05-28 00:00:00'",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.i = TIMESTAMP'2007-05-28 00:00:00'"
}
@@ -9281,13 +10215,14 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<subquery3>",
"access_type": "system",
- "rows": 1,
- "filtered": 100,
+ "rows": 0,
+ "filtered": 0,
"materialized": {
"unique": 1,
"query_block": {
@@ -9303,18 +10238,23 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "sq.i = '2007-05-28'",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.i = DATE'2007-05-28'"
}
@@ -9343,13 +10283,14 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<subquery3>",
"access_type": "system",
- "rows": 1,
- "filtered": 100,
+ "rows": 0,
+ "filtered": 0,
"materialized": {
"unique": 1,
"query_block": {
@@ -9365,18 +10306,23 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "sq.i = '10:00:02'",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.i = TIME'10:00:02'"
}
@@ -9405,23 +10351,29 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "nullif(1,v1.i)",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "nullif(1,t1.i)"
}
@@ -9500,23 +10452,29 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c = 'foo'",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = 'foo'"
}
@@ -9531,12 +10489,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "1 = t2.a"
}
@@ -9561,23 +10522,29 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "<cache>(<in_optimizer>(1,<exists>(subquery#2))) or v1.c = 'foo'",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -9591,12 +10558,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 128,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "1 = t2.a"
}
@@ -9737,12 +10707,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 2 and t1.a is not null"
}
@@ -9756,11 +10729,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "loops": 7,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t2.b",
"temporary_table": {
@@ -9769,7 +10745,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b > 2"
}
@@ -9797,12 +10775,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 2 and t1.a is not null"
}
@@ -9816,11 +10797,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "loops": 7,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t2.b",
"temporary_table": {
@@ -9829,7 +10813,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b > 2"
}
@@ -9860,12 +10846,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 2 and t1.a is not null"
}
@@ -9879,18 +10868,23 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "loops": 7,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b > 2"
}
@@ -9917,12 +10911,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 2 and t1.a is not null"
}
@@ -9936,11 +10933,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["m"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "loops": 7,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "m > 2",
"filesort": {
"sort_key": "t2.b",
@@ -9950,7 +10950,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -9980,24 +10982,30 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.f > 0",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "f > 0",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -10028,6 +11036,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -10038,7 +11047,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["i1"],
"ref": ["const"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"using_index": true
}
@@ -10048,7 +11059,9 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.i2 = 1"
},
@@ -10058,12 +11071,15 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.i2 = 1"
}
@@ -10096,24 +11112,30 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.f = 'a' or t.f = 'b'",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -10151,12 +11173,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.id2 is not null"
}
@@ -10170,12 +11195,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["id2"],
"ref": ["test.t1.id2"],
- "rows": 2,
+ "loops": 4,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "vc.ct > 0",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "ct > 0",
"filesort": {
"sort_key": "t2.id2",
@@ -10185,7 +11213,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -10250,12 +11280,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.x = 1"
}
@@ -10265,7 +11298,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 2,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 1 and v1.b = 1 and v1.max_c > 30"
},
@@ -10275,13 +11310,16 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c > 37 and max_c > 30",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.b = 1"
}
@@ -10335,12 +11373,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.x = 1"
}
@@ -10350,7 +11391,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 2,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 1 and v1.b = 1 and v1.d = 1 and v1.max_c > 30"
},
@@ -10360,13 +11403,16 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c > 37 and max_c > 30",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.b = 1 and t1.d = 1"
}
@@ -10433,7 +11479,7 @@ WHERE d_tab.e>1
;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 2 FirstMatch(t1)
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 10 test.t1.a,test.t1.b 1
3 DERIVED t2 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
EXPLAIN FORMAT=JSON SELECT * FROM t1
WHERE (t1.a,t1.b) IN
@@ -10452,12 +11498,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null"
}
@@ -10465,18 +11514,20 @@ EXPLAIN
{
"table": {
"table_name": "<derived3>",
- "access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
"key_length": "10",
"used_key_parts": ["e", "max_f"],
"ref": ["test.t1.a", "test.t1.b"],
- "rows": 2,
+ "loops": 5,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
- "first_match": "t1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_f > 18",
"filesort": {
"sort_key": "t2.e",
@@ -10486,7 +11537,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e > 1"
}
@@ -10532,7 +11585,7 @@ WHERE d_tab.max_f<25
;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 2 FirstMatch(t1)
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 10 test.t1.a,test.t1.b 1
3 DERIVED t2 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort
EXPLAIN FORMAT=JSON SELECT * FROM t1
WHERE (t1.a,t1.b) IN
@@ -10551,12 +11604,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b < 25 and t1.a is not null and t1.b is not null"
}
@@ -10564,18 +11620,20 @@ EXPLAIN
{
"table": {
"table_name": "<derived3>",
- "access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
"key_length": "10",
"used_key_parts": ["e", "max_f"],
"ref": ["test.t1.a", "test.t1.b"],
- "rows": 2,
+ "loops": 5,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
- "first_match": "t1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_f > 18 and max_f < 25",
"filesort": {
"sort_key": "t2.e",
@@ -10585,7 +11643,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -10649,12 +11709,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a is not null and t1.b is not null"
}
@@ -10668,24 +11731,30 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "max_f"],
"ref": ["test.t1.a", "test.t1.b"],
+ "loops": 5,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "d_tab.e > 1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t2.e",
"temporary_table": {
@@ -10694,7 +11763,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e > 1"
}
@@ -10765,12 +11836,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a is not null and t1.b is not null"
}
@@ -10784,24 +11858,30 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "max_f"],
"ref": ["test.t1.a", "test.t1.b"],
+ "loops": 5,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "d_tab.max_f > 20",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_f > 20",
"filesort": {
"sort_key": "t2.e",
@@ -10811,7 +11891,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -10873,17 +11955,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "dt.a = 2",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "a = 2",
"filesort": {
"sort_key": "t1.a",
@@ -10893,7 +11979,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -10930,17 +12018,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "dt.a > 1",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "a > 1",
"filesort": {
"sort_key": "t1.a",
@@ -10950,7 +12042,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 3"
}
@@ -10987,17 +12081,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "dt.a = 'ab'",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -11006,7 +12104,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -11042,17 +12142,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "dt.a = 1",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -11061,7 +12165,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -11117,12 +12223,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "1 in (0,t1.a) and t1.a is not null"
}
@@ -11136,11 +12245,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "loops": 2,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -11149,7 +12261,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "1 in (0,t1.a) and 1 in (0,t1.a)"
}
@@ -11185,12 +12299,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a in (1,t1.a) and t1.a is not null"
}
@@ -11204,11 +12321,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "loops": 2,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -11217,7 +12337,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a in (1,t1.a)"
}
@@ -11316,17 +12438,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a <= 2",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -11335,7 +12461,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a <= 2"
}
@@ -11362,12 +12490,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -11381,12 +12512,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["c"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 5,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = t.c and t.a >= 3",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -11395,7 +12529,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a >= 3"
}
@@ -11425,12 +12561,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -11438,18 +12577,23 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 5,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a = 2 and t2.a = t.c + 9",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 2"
}
@@ -11499,17 +12643,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "case when tab2.max_a = 1 or tab2.max_a = 2 then 1 else 0 end = 1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "case when max_a = 1 or max_a = 2 then 1 else 0 end = 1",
"filesort": {
"sort_key": "t1.b",
@@ -11519,7 +12667,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -11556,17 +12706,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "case when tab2.max_a = 1 or tab2.max_a > 2 and tab2.max_a < 4 then 1 else 0 end = 1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "case when max_a = 1 or max_a > 2 and max_a < 4 then 1 else 0 end = 1",
"filesort": {
"sort_key": "t1.b",
@@ -11576,7 +12730,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -11613,17 +12769,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "case when tab2.max_a > 1 and (tab2.max_a = 2 or tab2.max_a > 2) then 1 else 0 end = 1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "case when max_a > 1 and (max_a = 2 or max_a > 2) then 1 else 0 end = 1",
"filesort": {
"sort_key": "t1.b",
@@ -11633,7 +12793,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -11670,17 +12832,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "case when tab2.b = 2 or tab2.b = 4 then 1 else 0 end = 1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.b",
"temporary_table": {
@@ -11689,7 +12855,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "case when t1.b = 2 or t1.b = 4 then 1 else 0 end = 1"
}
@@ -11747,33 +12915,42 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 144,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.f is not null",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -11787,7 +12964,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 12,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f is not null"
},
@@ -11830,23 +13009,29 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
- "rows": 16,
+ "loops": 1,
+ "rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.f is not null",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f is not null"
}
@@ -11860,17 +13045,22 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["f"],
"ref": ["test.t1.f"],
- "rows": 2,
+ "loops": 8,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f is not null"
}
@@ -11915,12 +13105,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f2 < 2 and t1.f2 is not null"
}
@@ -11934,17 +13127,22 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["f2"],
"ref": ["test.t1.f2"],
- "rows": 2,
+ "loops": 2,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f2 < 2"
}
@@ -11970,12 +13168,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f2 < 2 and t1.f2 is not null"
}
@@ -11989,18 +13190,23 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["f2"],
"ref": ["test.t1.f2"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f2 < 2"
}
@@ -12050,12 +13256,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
@@ -12256,12 +13465,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v3.col1 = 123 and v3.col2 = 321",
"materialized": {
@@ -12271,6 +13483,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -12281,7 +13494,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["const"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -12292,6 +13507,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -12302,7 +13518,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["const"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -12340,17 +13558,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.s + 1 > 10 and v2.a > 1 and v2.a2 > 123",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "s + 1 > 10 and a2 > 123",
"filesort": {
"sort_key": "t1.a, f1(t1.a)",
@@ -12360,7 +13582,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -12393,12 +13617,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t4",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t4.a + 1 > 10 and t4.b > 1 and t4.c > 123 and t4.a is not null and t4.b is not null and t4.c is not null"
}
@@ -12412,13 +13639,16 @@ EXPLAIN
"key_length": "23",
"used_key_parts": ["a", "f1(a)", "sum(b)"],
"ref": ["test.t4.a", "test.t4.b", "test.t4.c"],
+ "loops": 3,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t4.c = `<subquery2>`.`sum(b)`",
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "`f1(a)` > 1 and `sum(b)` > 123",
"temporary_table": {
"nested_loop": [
@@ -12426,7 +13656,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a + 1 > 10"
}
@@ -12481,7 +13713,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a<5);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL
@@ -12490,12 +13722,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a < 5 and t2.a is not null"
}
@@ -12504,12 +13739,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
@@ -12520,6 +13757,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -12529,7 +13767,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a < 5"
}
@@ -12543,6 +13783,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 100",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -12552,7 +13793,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a < 5"
}
@@ -12583,7 +13826,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a=8);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <derived2> ALL distinct_key NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL
@@ -12592,12 +13835,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = 8"
}
@@ -12607,7 +13853,10 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "possible_keys": ["distinct_key"],
+ "loops": 9,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 8"
},
@@ -12623,6 +13872,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300",
"filesort": {
"sort_key": "t1.b",
@@ -12632,7 +13882,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 8"
}
@@ -12646,6 +13898,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 100",
"filesort": {
"sort_key": "t1.b",
@@ -12655,7 +13908,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 8 and t1.b > 10"
}
@@ -12685,7 +13940,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (t2.a=8);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <derived2> ALL distinct_key NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL
@@ -12694,12 +13949,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = 8"
}
@@ -12709,7 +13967,10 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "possible_keys": ["distinct_key"],
+ "loops": 9,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 8"
},
@@ -12725,6 +13986,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300",
"filesort": {
"sort_key": "t1.b",
@@ -12734,7 +13996,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 8"
}
@@ -12748,6 +14012,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 100",
"filesort": {
"sort_key": "t1.b",
@@ -12757,7 +14022,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 8 and t1.b > 10"
}
@@ -12789,7 +14056,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.c>200);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL
@@ -12798,12 +14065,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -12812,12 +14082,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c > 200",
"materialized": {
@@ -12829,6 +14101,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -12838,7 +14111,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9"
}
@@ -12852,6 +14127,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 100 and c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -12861,7 +14137,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10"
}
@@ -12894,7 +14172,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a<5) and (v1.c>110);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL
@@ -12903,12 +14181,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a < 5 and t2.a is not null"
}
@@ -12917,12 +14198,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c > 110",
"materialized": {
@@ -12934,6 +14217,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c > 110",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -12943,7 +14227,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a < 5"
}
@@ -12957,6 +14243,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 100 and c > 110",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -12966,7 +14253,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a < 5"
}
@@ -13000,7 +14289,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and ((v1.b>27) or (v1.b<19));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL
@@ -13009,12 +14298,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -13023,12 +14315,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b > 27 or v1.b < 19",
"materialized": {
@@ -13040,6 +14334,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13049,7 +14344,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and (t1.b > 27 or t1.b < 19)"
}
@@ -13063,6 +14360,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 100",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13072,7 +14370,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and (t1.b > 27 or t1.b < 19)"
}
@@ -13111,7 +14411,7 @@ explain select * from v1,t2 where
(v1.a=t2.a) and ((v1.c>200) or (v1.c<105));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL
@@ -13121,12 +14421,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -13135,12 +14438,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c > 200 or v1.c < 105",
"materialized": {
@@ -13152,6 +14457,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and (c > 200 or c < 105)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13161,7 +14467,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9"
}
@@ -13175,6 +14483,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 100 and (c > 200 or c < 105)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13184,7 +14493,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10"
}
@@ -13255,12 +14566,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -13269,7 +14583,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a > 3 or v1.a = 1 and v1.c < 110"
},
@@ -13286,6 +14602,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and (t1.a > 3 and c > 110 or t1.a = 1 and c < 110)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13295,7 +14612,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and (t1.a > 3 or t1.a = 1)"
}
@@ -13309,6 +14628,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 100 and (t1.a > 3 and c > 110 or t1.a = 1 and c < 110)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13318,7 +14638,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and (t1.a > 3 or t1.a = 1)"
}
@@ -13399,7 +14721,7 @@ where
((d1.a<4) and (d1.c<200)));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.b 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.b 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL
@@ -13418,12 +14740,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b is not null"
}
@@ -13432,12 +14757,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t2.b"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.c = 909 and t2.b > 13 or d1.a < 4 and d1.c < 200",
"materialized": {
@@ -13449,6 +14776,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and (t1.b > 13 or t1.a < 4 and c < 200)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13458,7 +14786,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and (t1.b > 13 or t1.a < 4)"
}
@@ -13472,6 +14802,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 100 and (t1.b > 13 or t1.a < 4 and c < 200)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13481,7 +14812,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and (t1.b > 13 or t1.a < 4)"
}
@@ -13524,7 +14857,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a<5);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL
@@ -13533,12 +14866,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a < 5 and t2.a is not null"
}
@@ -13547,12 +14883,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
@@ -13563,6 +14901,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13572,7 +14911,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a < 5"
}
@@ -13586,6 +14927,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13595,7 +14937,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a < 5"
}
@@ -13628,7 +14972,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a=6);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <derived2> ALL distinct_key NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL
@@ -13637,12 +14981,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = 6"
}
@@ -13652,7 +14999,10 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "possible_keys": ["distinct_key"],
+ "loops": 9,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 6"
},
@@ -13668,6 +15018,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200",
"filesort": {
"sort_key": "t1.b",
@@ -13677,7 +15028,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 6"
}
@@ -13691,6 +15044,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300",
"filesort": {
"sort_key": "t1.b",
@@ -13700,7 +15054,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 6 and t1.b > 10"
}
@@ -13732,7 +15088,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (t2.a=6);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <derived2> ALL distinct_key NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL
@@ -13741,12 +15097,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a = 6"
}
@@ -13756,7 +15115,10 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "possible_keys": ["distinct_key"],
+ "loops": 9,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a = 6"
},
@@ -13772,6 +15134,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200",
"filesort": {
"sort_key": "t1.b",
@@ -13781,7 +15144,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 6"
}
@@ -13795,6 +15160,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300",
"filesort": {
"sort_key": "t1.b",
@@ -13804,7 +15170,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 6 and t1.b > 10"
}
@@ -13840,7 +15208,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.c>500);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL
@@ -13849,12 +15217,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -13863,12 +15234,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c > 500",
"materialized": {
@@ -13880,6 +15253,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200 and c > 500",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13889,7 +15263,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9"
}
@@ -13903,6 +15279,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c > 500",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13912,7 +15289,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10"
}
@@ -13945,7 +15324,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a<5) and (v1.c>500);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL
@@ -13954,12 +15333,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a < 5 and t2.a is not null"
}
@@ -13968,12 +15350,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c > 500",
"materialized": {
@@ -13985,6 +15369,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200 and c > 500",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -13994,7 +15379,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a < 5"
}
@@ -14008,6 +15395,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c > 500",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14017,7 +15405,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a < 5"
}
@@ -14053,7 +15443,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and ((v1.b>27) or (v1.b<19));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL
@@ -14062,12 +15452,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -14076,12 +15469,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b > 27 or v1.b < 19",
"materialized": {
@@ -14093,6 +15488,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14102,7 +15498,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and (t1.b > 27 or t1.b < 19)"
}
@@ -14116,6 +15514,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14125,7 +15524,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and (t1.b > 27 or t1.b < 19)"
}
@@ -14164,7 +15565,7 @@ explain select * from v1,t2 where
(v1.a=t2.a) and ((v1.c<400) or (v1.c>800));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL
@@ -14174,12 +15575,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a is not null"
}
@@ -14188,12 +15592,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c < 400 or v1.c > 800",
"materialized": {
@@ -14205,6 +15611,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200 and (c < 400 or c > 800)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14214,7 +15621,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9"
}
@@ -14228,6 +15637,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and (c < 400 or c > 800)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14237,7 +15647,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10"
}
@@ -14306,12 +15718,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -14320,7 +15735,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 9,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a > 1 or v1.a = 1 and v1.c > 500"
},
@@ -14337,6 +15754,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200 and (t1.a > 1 and c < 500 or t1.a = 1 and c > 500)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14346,7 +15764,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and (t1.a > 1 or t1.a = 1)"
}
@@ -14360,6 +15780,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and (t1.a > 1 and c < 500 or t1.a = 1 and c > 500)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14369,7 +15790,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and (t1.a > 1 or t1.a = 1)"
}
@@ -14446,7 +15869,7 @@ where
((d1.a>4) and (d1.c>500)));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.b 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.b 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL
@@ -14465,12 +15888,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b is not null"
}
@@ -14479,12 +15905,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t2.b"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.c = 988 and t2.b > 13 or d1.a > 4 and d1.c > 500",
"materialized": {
@@ -14496,6 +15924,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200 and (t1.b > 13 or t1.a > 4 and c > 500)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14505,7 +15934,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and (t1.b > 13 or t1.a > 4)"
}
@@ -14519,6 +15950,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and (t1.b > 13 or t1.a > 4 and c > 500)",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14528,7 +15960,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and (t1.b > 13 or t1.a > 4)"
}
@@ -14570,7 +16004,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a>5) and (v1.c>200);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 3 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 3 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 UNION <derived4> ALL NULL NULL NULL NULL 18 Using where
4 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
@@ -14582,12 +16016,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 5 and t2.a is not null"
}
@@ -14596,12 +16033,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c > 200",
"materialized": {
@@ -14613,6 +16052,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200 and c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14622,7 +16062,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a > 5"
}
@@ -14636,12 +16078,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "__5.a > 5 and __5.c > 200",
"materialized": {
@@ -14653,6 +16098,7 @@ EXPLAIN
{
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14662,7 +16108,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a > 5"
}
@@ -14676,6 +16124,7 @@ EXPLAIN
"query_block": {
"select_id": 5,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 530 and c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14685,7 +16134,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 3 and t1.a > 5"
}
@@ -14738,7 +16189,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.c<200);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 3 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 3 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
4 UNION t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
@@ -14748,12 +16199,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 4 and t2.a is not null"
}
@@ -14762,12 +16216,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c < 200",
"materialized": {
@@ -14779,6 +16235,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200 and c < 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14788,7 +16245,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a > 4"
}
@@ -14802,6 +16261,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 500 and c < 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14811,7 +16271,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 3 and t1.a > 4"
}
@@ -14825,6 +16287,7 @@ EXPLAIN
"query_block": {
"select_id": 4,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c < 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14834,7 +16297,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a > 4"
}
@@ -14876,7 +16341,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a>5) and (v1.c>200);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 3 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 3 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 UNION t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
4 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
@@ -14886,12 +16351,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 5 and t2.a is not null"
}
@@ -14900,12 +16368,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c > 200",
"materialized": {
@@ -14917,6 +16387,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200 and c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14926,7 +16397,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a > 5"
}
@@ -14940,6 +16413,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14949,7 +16423,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a > 5"
}
@@ -14963,6 +16439,7 @@ EXPLAIN
"query_block": {
"select_id": 4,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 530 and c > 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -14972,7 +16449,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 3 and t1.a > 5"
}
@@ -15016,7 +16495,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.c<200);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 3 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 3 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
4 UNION t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
@@ -15026,12 +16505,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 4 and t2.a is not null"
}
@@ -15040,12 +16522,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c < 200",
"materialized": {
@@ -15057,6 +16541,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 200 and c < 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15066,7 +16551,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a > 4"
}
@@ -15080,6 +16567,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 500 and c < 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15089,7 +16577,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 3 and t1.a > 4"
}
@@ -15103,6 +16593,7 @@ EXPLAIN
"query_block": {
"select_id": 4,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c < 200",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15112,7 +16603,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a > 4"
}
@@ -15154,7 +16647,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.c<150);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
4 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
@@ -15164,12 +16657,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 4 and t2.a is not null"
}
@@ -15178,12 +16674,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c < 150",
"materialized": {
@@ -15195,6 +16693,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c < 150",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15204,7 +16703,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a > 4"
}
@@ -15218,6 +16719,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 500 and c < 150",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15227,7 +16729,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 7 and t1.a > 4"
}
@@ -15241,6 +16745,7 @@ EXPLAIN
"query_block": {
"select_id": 4,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 150 and c < 150",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15250,7 +16755,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a > 4"
}
@@ -15290,7 +16797,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.c<130);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT <derived4> ALL NULL NULL NULL NULL 18 Using where
4 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
@@ -15302,12 +16809,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 4 and t2.a is not null"
}
@@ -15316,12 +16826,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c < 130",
"materialized": {
@@ -15333,6 +16845,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c < 130",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15342,7 +16855,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a > 4"
}
@@ -15356,12 +16871,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "__5.a > 4 and __5.c < 130",
"materialized": {
@@ -15373,6 +16891,7 @@ EXPLAIN
{
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "c > 150 and c < 130",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15382,7 +16901,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a > 4"
}
@@ -15396,6 +16917,7 @@ EXPLAIN
"query_block": {
"select_id": 5,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 500 and c < 130",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15405,7 +16927,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 7 and t1.a > 4"
}
@@ -15457,7 +16981,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.c<130);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 3 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 3 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT <derived4> ALL NULL NULL NULL NULL 18 Using where
4 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
@@ -15470,12 +16994,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 4 and t2.a is not null"
}
@@ -15484,12 +17011,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c < 130",
"materialized": {
@@ -15501,6 +17030,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300 and c < 130",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15510,7 +17040,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a > 4"
}
@@ -15524,12 +17056,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived4>",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "__6.a > 4 and __6.c < 130",
"materialized": {
@@ -15541,6 +17076,7 @@ EXPLAIN
{
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"having_condition": "c > 150 and c < 130",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15550,7 +17086,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a > 4"
}
@@ -15564,6 +17102,7 @@ EXPLAIN
"query_block": {
"select_id": 5,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 500 and c < 130",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15573,7 +17112,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 7 and t1.a > 4"
}
@@ -15596,6 +17137,7 @@ EXPLAIN
"query_block": {
"select_id": 6,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "c < 120 and c < 130",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15605,7 +17147,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 7 and t1.a > 4"
}
@@ -15648,7 +17192,7 @@ a b c a b c
explain select * from v2,t2 where (v2.a=t2.a) and (v2.a>4) and (v2.c<150);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 1 Using where
2 DERIVED <derived3> ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
4 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
@@ -15658,12 +17202,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 4 and t2.a is not null"
}
@@ -15677,12 +17224,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.c < 150",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 150",
"filesort": {
"sort_key": "v1.a, v1.b",
@@ -15692,7 +17242,9 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 7 and v1.a > 4",
"materialized": {
@@ -15704,6 +17256,7 @@ EXPLAIN
{
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15713,7 +17266,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a < 7 and t1.a > 4"
}
@@ -15727,6 +17282,7 @@ EXPLAIN
"query_block": {
"select_id": 4,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 120",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15736,7 +17292,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a < 7 and t1.a > 4"
}
@@ -15786,7 +17344,7 @@ a b c a b c
explain select * from v2,t2 where (v2.a=t2.a) and (v2.a>4) and (v2.c<150);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 1 Using where
2 DERIVED <derived3> ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
4 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
@@ -15796,12 +17354,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 4 and t2.a is not null"
}
@@ -15815,12 +17376,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.c < 150",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c < 150",
"filesort": {
"sort_key": "v1.a, v1.b",
@@ -15830,7 +17394,9 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 7 and v1.a > 4",
"materialized": {
@@ -15842,6 +17408,7 @@ EXPLAIN
{
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "c < 300",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15851,7 +17418,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a < 7 and t1.a > 4"
}
@@ -15865,6 +17434,7 @@ EXPLAIN
"query_block": {
"select_id": 4,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 150",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -15874,7 +17444,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a < 7 and t1.a > 4"
}
@@ -15921,7 +17493,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.b>12) and (v1.c<450);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL
@@ -15930,12 +17502,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 4 and t2.a is not null"
}
@@ -15944,12 +17519,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b > 12 and v1.c < 450",
"materialized": {
@@ -15961,6 +17538,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 300 and t1.b > 12 and c < 450",
"filesort": {
"sort_key": "t1.a",
@@ -15970,7 +17548,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a > 4"
}
@@ -15984,6 +17564,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 200 and t1.a > 4 and c < 450",
"filesort": {
"sort_key": "t1.b",
@@ -15993,7 +17574,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b < 21 and t1.b > 12"
}
@@ -16033,7 +17616,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a<2) and (v1.b<30) and (v1.c>450);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL
@@ -16042,12 +17625,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a < 2 and t2.a is not null"
}
@@ -16056,12 +17642,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b < 30 and v1.c > 450",
"materialized": {
@@ -16073,6 +17661,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 300 and t1.b < 30 and c > 450",
"filesort": {
"sort_key": "t1.a",
@@ -16082,7 +17671,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 20 and t1.a < 2"
}
@@ -16096,6 +17687,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 150 and t1.a < 2 and c > 450",
"filesort": {
"sort_key": "t1.b",
@@ -16105,7 +17697,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 7 and t1.b < 30"
}
@@ -16147,7 +17741,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and ((v1.a<2) or (v1.a<5)) and (v1.c>450);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 EXCEPT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL
@@ -16156,12 +17750,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t2.a < 2 or t2.a < 5) and t2.a is not null"
}
@@ -16170,12 +17767,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c > 450",
"materialized": {
@@ -16187,6 +17786,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 300 and c > 450",
"filesort": {
"sort_key": "t1.a",
@@ -16196,7 +17796,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 20 and (t1.a < 2 or t1.a < 5)"
}
@@ -16210,6 +17812,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"having_condition": "c > 150 and (t1.a < 2 or t1.a < 5) and c > 450",
"filesort": {
"sort_key": "t1.b",
@@ -16219,7 +17822,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 7"
}
@@ -16262,7 +17867,7 @@ a b c a b c
explain select * from v1,t2 where (v1.a=t2.a) and (v1.a>1) and (v1.b > 12) and (v1.c>400);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.a 3 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.a 3 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
3 INTERSECT t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
4 UNION t1 ALL NULL NULL NULL NULL 18 Using where; Using temporary; Using filesort
@@ -16272,12 +17877,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a > 1 and t2.a is not null"
}
@@ -16286,12 +17894,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 9,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.b > 12 and v1.c > 400",
"materialized": {
@@ -16303,6 +17913,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "c > 100 and t1.b > 12 and c > 400",
"filesort": {
"sort_key": "t1.a",
@@ -16312,7 +17923,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 9 and t1.a > 1"
}
@@ -16326,6 +17939,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"having_condition": "c < 800 and t1.a > 1 and c > 400",
"filesort": {
"sort_key": "t1.b",
@@ -16335,7 +17949,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 3 and t1.b > 12"
}
@@ -16349,6 +17965,7 @@ EXPLAIN
"query_block": {
"select_id": 4,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "c > 300 and c > 400",
"filesort": {
"sort_key": "t1.a, t1.b",
@@ -16358,7 +17975,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 10 and t1.a > 1 and t1.b > 12"
}
@@ -16409,19 +18028,22 @@ a b max_c a b c
explain select * from v1,t2 where (v1.b=t2.b) and (v1.a<5);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.b 2 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.b 1 Using where
2 DERIVED t3 range i1 i1 5 NULL 5 Using index condition
explain format=json select * from v1,t2 where (v1.b=t2.b) and (v1.a<5);
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b is not null"
}
@@ -16435,12 +18057,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t2.b"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 5",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -16450,7 +18075,9 @@ EXPLAIN
"key": "i1",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t3.a > 0 and t3.a < 5"
}
@@ -16486,7 +18113,7 @@ a b c a b c
explain select * from v1,t2 where (v1.b=t2.b) and (v1.a<4);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.b 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.b 1 Using where
2 DERIVED t3 range i1 i1 5 NULL 2 Using index condition
3 UNION t3 range i1 i1 5 NULL 1 Using index condition
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
@@ -16495,12 +18122,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b is not null"
}
@@ -16509,12 +18139,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t2.b"],
- "rows": 2,
+ "loops": 9,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 4",
"materialized": {
@@ -16526,6 +18158,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -16535,7 +18168,9 @@ EXPLAIN
"key": "i1",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t3.a > 1 and t3.a < 4"
}
@@ -16547,6 +18182,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -16556,7 +18192,9 @@ EXPLAIN
"key": "i1",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t3.a > 2 and t3.a < 4"
}
@@ -16596,7 +18234,7 @@ a b c a b c
explain select * from v1,t2 where (v1.b=t2.b) and (v1.a<3);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.b 2 Using where
+1 PRIMARY <derived2> ref key1,distinct_key key1 5 test.t2.b 2 Using where
2 DERIVED t3 range i1 i1 5 NULL 1 Using index condition
3 UNION t3 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
@@ -16605,12 +18243,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.b is not null"
}
@@ -16619,12 +18260,14 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
+ "possible_keys": ["key1", "distinct_key"],
+ "key": "key1",
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t2.b"],
+ "loops": 9,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.a < 3",
"materialized": {
@@ -16636,6 +18279,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -16645,7 +18289,9 @@ EXPLAIN
"key": "i1",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t3.a > 1 and t3.a < 3"
}
@@ -16657,6 +18303,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "t3.a < 3",
"filesort": {
"sort_key": "t3.b",
@@ -16666,7 +18313,9 @@ EXPLAIN
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.b < 21"
}
@@ -16738,17 +18387,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a > 2 and t.c in ('aa','bb','cc')",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -16764,7 +18417,9 @@ EXPLAIN
"table_name": "t2",
"access_type": "ALL",
"possible_keys": ["idx"],
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 80,
"attached_condition": "t2.a > 2 and t2.c in ('aa','bb','cc')"
}
@@ -16853,12 +18508,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 32,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a > 2 and t.c in ('aa','bb','cc')",
"materialized": {
@@ -16868,6 +18526,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -16883,7 +18542,9 @@ EXPLAIN
"table_name": "t2",
"access_type": "ALL",
"possible_keys": ["idx"],
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 80,
"attached_condition": "t2.a > 2 and t2.c in ('aa','bb','cc')"
}
@@ -16897,6 +18558,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -16912,7 +18574,9 @@ EXPLAIN
"table_name": "t2",
"access_type": "ALL",
"possible_keys": ["idx"],
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 80,
"attached_condition": "t2.a > 2"
}
@@ -16961,12 +18625,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c in ('aa','bb','cc') and t1.a is not null and t1.c is not null"
}
@@ -16980,11 +18647,14 @@ EXPLAIN
"key_length": "24",
"used_key_parts": ["a", "c"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 8,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -16999,7 +18669,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.c in ('aa','bb','cc')"
}
@@ -17101,12 +18773,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 48,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a > 2 and t.c in ('aa','bb','cc')",
"materialized": {
@@ -17116,6 +18791,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -17131,7 +18807,9 @@ EXPLAIN
"table_name": "t2",
"access_type": "ALL",
"possible_keys": ["idx"],
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 80,
"attached_condition": "t2.a > 2 and t2.c in ('aa','bb','cc')"
}
@@ -17145,6 +18823,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -17160,7 +18839,9 @@ EXPLAIN
"table_name": "t2",
"access_type": "ALL",
"possible_keys": ["idx"],
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 80,
"attached_condition": "t2.a > 2"
}
@@ -17174,6 +18855,7 @@ EXPLAIN
"query_block": {
"select_id": 4,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"having_condition": "t2.c in ('aa','bb','cc')",
"filesort": {
"sort_key": "t2.a",
@@ -17184,7 +18866,9 @@ EXPLAIN
"table_name": "t2",
"access_type": "ALL",
"possible_keys": ["idx"],
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 80,
"attached_condition": "t2.a > 2"
}
@@ -17250,17 +18934,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a > 2 and t.c in ('aa','bb','cc')",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -17276,7 +18964,9 @@ EXPLAIN
"table_name": "t2",
"access_type": "ALL",
"possible_keys": ["idx"],
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 80,
"attached_condition": "t2.a > 2 and t2.c in ('aa','bb','cc')"
}
@@ -17338,17 +19028,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a > 2 and t.c in ('aa','bb','cc')",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -17364,7 +19058,9 @@ EXPLAIN
"table_name": "t2",
"access_type": "ALL",
"possible_keys": ["idx"],
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 80,
"attached_condition": "t2.a > 2"
}
@@ -17426,17 +19122,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t.a > 2 and t.c in ('aa','bb','cc')",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -17456,7 +19156,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 20,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.c in ('aa','bb','cc')"
}
@@ -17528,7 +19230,7 @@ on t1.a=t.a
where t1.b < 3;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 range idx_b idx_b 5 NULL 4 100.00 Using index condition; Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 100.00
2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 1 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`s` AS `s`,`t`.`m` AS `m` from `test`.`t1` join (/* select#2 */ select `test`.`t2`.`a` AS `a`,sum(`test`.`t2`.`b`) AS `s`,min(`test`.`t2`.`c`) AS `m` from `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` group by `test`.`t2`.`a`) `t` where `t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`b` < 3
@@ -17541,6 +19243,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -17550,7 +19253,9 @@ EXPLAIN
"key": "idx_b",
"key_length": "5",
"used_key_parts": ["b"],
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.b < 3",
"attached_condition": "t1.a is not null"
@@ -17565,12 +19270,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "loops": 4,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"outer_ref_condition": "t1.a is not null",
"nested_loop": [
{
@@ -17582,7 +19290,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -17649,7 +19359,7 @@ on t1.a=t.a
where t1.b <= 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL idx_b NULL NULL NULL 12 83.33 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 100.00
2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 1 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`s` AS `s`,`t`.`m` AS `m` from `test`.`t1` join (/* select#2 */ select `test`.`t2`.`a` AS `a`,sum(`test`.`t2`.`b`) AS `s`,min(`test`.`t2`.`b`) AS `m` from `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` group by `test`.`t2`.`a`) `t` where `t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`b` <= 5
@@ -17662,13 +19372,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
"possible_keys": ["idx_b"],
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 83.33333588,
"attached_condition": "t1.b <= 5 and t1.a is not null"
}
@@ -17682,12 +19395,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "loops": 10,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"outer_ref_condition": "t1.a is not null",
"nested_loop": [
{
@@ -17699,7 +19415,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -17764,10 +19482,10 @@ from t1 left join
on t1.a=t.a;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 12 100.00
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 9 100.00 Using where
-2 DERIVED t2 ALL idx_a NULL NULL NULL 90 100.00 Using temporary; Using filesort
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 100.00 Using where
+2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 1 100.00
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t1` left join (/* select#2 */ select `test`.`t2`.`a` AS `a`,max(`test`.`t2`.`b`) AS `max`,min(`test`.`t2`.`b`) AS `min` from `test`.`t2` group by `test`.`t2`.`a`) `t` on(`t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`a` is not null) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t1` left join (/* select#2 */ select `test`.`t2`.`a` AS `a`,max(`test`.`t2`.`b`) AS `max`,min(`test`.`t2`.`b`) AS `min` from `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` group by `test`.`t2`.`a`) `t` on(`t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`a` is not null) where 1
explain format=json select t1.a,t.max,t.min
from t1 left join
(select a, max(t2.b) max, min(t2.b) min from t2 group by t2.a) t
@@ -17776,13 +19494,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -17795,28 +19516,34 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
- "rows": 9,
+ "loops": 12,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "trigcond(trigcond(t1.a is not null))",
"materialized": {
+ "lateral": 1,
"query_block": {
"select_id": 2,
- "filesort": {
- "sort_key": "t2.a",
- "temporary_table": {
- "nested_loop": [
- {
- "table": {
- "table_name": "t2",
- "access_type": "ALL",
- "possible_keys": ["idx_a"],
- "rows": 90,
- "filtered": 100
- }
- }
- ]
+ "cost": "COST_REPLACED",
+ "outer_ref_condition": "t1.a is not null",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ref",
+ "possible_keys": ["idx_a"],
+ "key": "idx_a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.a"],
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 100
+ }
}
- }
+ ]
}
}
}
@@ -17865,7 +19592,7 @@ on t3.a=t.a and t3.c=t.c
where t3.b > 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 range idx_b idx_b 5 NULL 2 100.00 Using index condition; Using where
-1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 2 100.00
+1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 1 100.00
2 LATERAL DERIVED t4 ref idx idx 133 test.t3.a,test.t3.c 1 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`c` AS `c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t3` join (/* select#2 */ select `test`.`t4`.`a` AS `a`,`test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` where `test`.`t4`.`a` = `test`.`t3`.`a` and `test`.`t4`.`c` = `test`.`t3`.`c` group by `test`.`t4`.`a`,`test`.`t4`.`c`) `t` where `t`.`a` = `test`.`t3`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t3`.`b` > 15
@@ -17878,6 +19605,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -17887,7 +19615,9 @@ EXPLAIN
"key": "idx_b",
"key_length": "5",
"used_key_parts": ["b"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t3.b > 15",
"attached_condition": "t3.a is not null and t3.c is not null"
@@ -17902,12 +19632,15 @@ EXPLAIN
"key_length": "133",
"used_key_parts": ["a", "c"],
"ref": ["test.t3.a", "test.t3.c"],
- "rows": 2,
+ "loops": 2,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"outer_ref_condition": "t3.a is not null and t3.c is not null",
"nested_loop": [
{
@@ -17919,7 +19652,9 @@ EXPLAIN
"key_length": "133",
"used_key_parts": ["a", "c"],
"ref": ["test.t3.a", "test.t3.c"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -17954,10 +19689,10 @@ on t3.a=t.a and t3.c=t.c
where t3.b <= 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL idx_b NULL NULL NULL 12 83.33 Using where
-1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 4 100.00
-2 DERIVED t4 ALL idx NULL NULL NULL 40 100.00 Using temporary; Using filesort
+1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 1 100.00
+2 LATERAL DERIVED t4 ref idx idx 133 test.t3.a,test.t3.c 1 100.00
Warnings:
-Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`c` AS `c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t3` join (/* select#2 */ select `test`.`t4`.`a` AS `a`,`test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` group by `test`.`t4`.`a`,`test`.`t4`.`c`) `t` where `t`.`a` = `test`.`t3`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t3`.`b` <= 15
+Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`c` AS `c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t3` join (/* select#2 */ select `test`.`t4`.`a` AS `a`,`test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` where `test`.`t4`.`a` = `test`.`t3`.`a` and `test`.`t4`.`c` = `test`.`t3`.`c` group by `test`.`t4`.`a`,`test`.`t4`.`c`) `t` where `t`.`a` = `test`.`t3`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t3`.`b` <= 15
explain format=json select t3.a,t3.c,t.max,t.min
from t3 join
(select a, c, max(b) max, min(b) min from t4 group by a,c) t
@@ -17967,13 +19702,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
"possible_keys": ["idx_b"],
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 83.33333588,
"attached_condition": "t3.b <= 15 and t3.a is not null and t3.c is not null"
}
@@ -17987,27 +19725,33 @@ EXPLAIN
"key_length": "133",
"used_key_parts": ["a", "c"],
"ref": ["test.t3.a", "test.t3.c"],
- "rows": 4,
+ "loops": 10,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
+ "lateral": 1,
"query_block": {
"select_id": 2,
- "filesort": {
- "sort_key": "t4.a, t4.c",
- "temporary_table": {
- "nested_loop": [
- {
- "table": {
- "table_name": "t4",
- "access_type": "ALL",
- "possible_keys": ["idx"],
- "rows": 40,
- "filtered": 100
- }
- }
- ]
+ "cost": "COST_REPLACED",
+ "outer_ref_condition": "t3.a is not null and t3.c is not null",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t4",
+ "access_type": "ref",
+ "possible_keys": ["idx"],
+ "key": "idx",
+ "key_length": "133",
+ "used_key_parts": ["a", "c"],
+ "ref": ["test.t3.a", "test.t3.c"],
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 100
+ }
}
- }
+ ]
}
}
}
@@ -18038,7 +19782,7 @@ on t3.a=t.a and t3.c=t.c
where t3.b > 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 range idx_b idx_b 5 NULL 2 100.00 Using index condition; Using where
-1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 2 100.00
+1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 1 100.00
2 LATERAL DERIVED t4 ref idx idx 133 test.t3.a,test.t3.c 1 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`c` AS `c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t3` join (/* select#2 */ select `test`.`t4`.`a` AS `a`,`test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` where `test`.`t4`.`a` = `test`.`t3`.`a` and `test`.`t4`.`c` = `test`.`t3`.`c` group by `test`.`t4`.`c`,`test`.`t4`.`a`) `t` where `t`.`a` = `test`.`t3`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t3`.`b` > 15
@@ -18051,6 +19795,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -18060,7 +19805,9 @@ EXPLAIN
"key": "idx_b",
"key_length": "5",
"used_key_parts": ["b"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t3.b > 15",
"attached_condition": "t3.a is not null and t3.c is not null"
@@ -18075,12 +19822,15 @@ EXPLAIN
"key_length": "133",
"used_key_parts": ["a", "c"],
"ref": ["test.t3.a", "test.t3.c"],
- "rows": 2,
+ "loops": 2,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"outer_ref_condition": "t3.a is not null and t3.c is not null",
"nested_loop": [
{
@@ -18092,7 +19842,9 @@ EXPLAIN
"key_length": "133",
"used_key_parts": ["a", "c"],
"ref": ["test.t3.a", "test.t3.c"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -18127,10 +19879,10 @@ on t3.a=t.a and t3.c=t.c
where t3.b <= 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL idx_b NULL NULL NULL 12 83.33 Using where
-1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 4 100.00
-2 DERIVED t4 ALL idx NULL NULL NULL 40 100.00 Using temporary; Using filesort
+1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 1 100.00
+2 LATERAL DERIVED t4 ref idx idx 133 test.t3.a,test.t3.c 1 100.00
Warnings:
-Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`c` AS `c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t3` join (/* select#2 */ select `test`.`t4`.`a` AS `a`,`test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` group by `test`.`t4`.`c`,`test`.`t4`.`a`) `t` where `t`.`a` = `test`.`t3`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t3`.`b` <= 15
+Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`c` AS `c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t3` join (/* select#2 */ select `test`.`t4`.`a` AS `a`,`test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` where `test`.`t4`.`a` = `test`.`t3`.`a` and `test`.`t4`.`c` = `test`.`t3`.`c` group by `test`.`t4`.`c`,`test`.`t4`.`a`) `t` where `t`.`a` = `test`.`t3`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t3`.`b` <= 15
explain format=json select t3.a,t3.c,t.max,t.min
from t3 join
(select a, c, max(b) max, min(b) min from t4 group by c,a) t
@@ -18140,13 +19892,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
"possible_keys": ["idx_b"],
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 83.33333588,
"attached_condition": "t3.b <= 15 and t3.a is not null and t3.c is not null"
}
@@ -18160,27 +19915,33 @@ EXPLAIN
"key_length": "133",
"used_key_parts": ["a", "c"],
"ref": ["test.t3.a", "test.t3.c"],
- "rows": 4,
+ "loops": 10,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
+ "lateral": 1,
"query_block": {
"select_id": 2,
- "filesort": {
- "sort_key": "t4.c, t4.a",
- "temporary_table": {
- "nested_loop": [
- {
- "table": {
- "table_name": "t4",
- "access_type": "ALL",
- "possible_keys": ["idx"],
- "rows": 40,
- "filtered": 100
- }
- }
- ]
+ "cost": "COST_REPLACED",
+ "outer_ref_condition": "t3.a is not null and t3.c is not null",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t4",
+ "access_type": "ref",
+ "possible_keys": ["idx"],
+ "key": "idx",
+ "key_length": "133",
+ "used_key_parts": ["a", "c"],
+ "ref": ["test.t3.a", "test.t3.c"],
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 100
+ }
}
- }
+ ]
}
}
}
@@ -18224,7 +19985,7 @@ where t2.b between 80 and 85 and t2.c in ('y','z') and t2.a=t3.a and t3.c=t.c;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 range idx idx 133 NULL 2 100.00 Using index condition; Using where
1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 1 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 2 100.00
+1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 1 100.00
2 LATERAL DERIVED t4 ref idx_c idx_c 128 test.t3.c 2 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`t`.`c` AS `t_c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t2` join `test`.`t3` join (/* select#2 */ select `test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` where `test`.`t4`.`c` = `test`.`t3`.`c` group by `test`.`t4`.`c`) `t` where `test`.`t3`.`a` = `test`.`t2`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t2`.`b` between 80 and 85 and `test`.`t2`.`c` in ('y','z')
@@ -18235,6 +19996,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -18244,7 +20006,9 @@ EXPLAIN
"key": "idx",
"key_length": "133",
"used_key_parts": ["c", "b"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t2.b between 80 and 85 and t2.c in ('y','z')",
"attached_condition": "t2.a is not null"
@@ -18259,7 +20023,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.c is not null"
}
@@ -18273,12 +20039,15 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t3.c"],
- "rows": 2,
+ "loops": 3,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"outer_ref_condition": "t3.c is not null",
"nested_loop": [
{
@@ -18290,7 +20059,9 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t3.c"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -18395,12 +20166,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 90,
+ "cost": "COST_REPLACED",
"filtered": 60,
"attached_condition": "t2.b < 40 and t2.a is not null"
}
@@ -18414,7 +20188,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 53.99999991,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.c is not null"
}
@@ -18428,11 +20204,14 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t3.c"],
+ "loops": 80.99999987,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t4.c",
"temporary_table": {
@@ -18442,7 +20221,9 @@ EXPLAIN
"table_name": "t4",
"access_type": "ALL",
"possible_keys": ["idx_c"],
+ "loops": 1,
"rows": 160,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -18496,7 +20277,7 @@ where t2.b between 80 and 85 and t2.c in ('y','z') and t2.a=t3.a and t3.c=t.c;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 range idx idx 133 NULL 2 100.00 Using index condition; Using where
1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 1 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 2 100.00
+1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 1 100.00
2 LATERAL DERIVED t4 ref idx_c idx_c 128 test.t3.c 2 100.00 Using temporary
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t3`.`c` AS `c`,`t`.`c` AS `c`,`t`.`b` AS `b`,`t`.`sum(b) over (partition by c)` AS `sum(b) over (partition by c)` from `test`.`t2` join `test`.`t3` join (/* select#2 */ select `test`.`t4`.`c` AS `c`,`test`.`t4`.`b` AS `b`,sum(`test`.`t4`.`b`) over ( partition by `test`.`t4`.`c`) AS `sum(b) over (partition by c)` from `test`.`t4` where `test`.`t4`.`c` = `test`.`t3`.`c`) `t` where `test`.`t3`.`a` = `test`.`t2`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t2`.`b` between 80 and 85 and `test`.`t2`.`c` in ('y','z')
@@ -18507,6 +20288,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -18516,7 +20298,9 @@ EXPLAIN
"key": "idx",
"key_length": "133",
"used_key_parts": ["c", "b"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t2.b between 80 and 85 and t2.c in ('y','z')",
"attached_condition": "t2.a is not null"
@@ -18531,7 +20315,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.c is not null"
}
@@ -18545,12 +20331,15 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t3.c"],
- "rows": 2,
+ "loops": 3,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"outer_ref_condition": "t3.c is not null",
"window_functions_computation": {
"sorts": [
@@ -18571,7 +20360,9 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t3.c"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -18918,12 +20709,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 90,
+ "cost": "COST_REPLACED",
"filtered": 60,
"attached_condition": "t2.b < 40 and t2.a is not null"
}
@@ -18937,7 +20731,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
+ "loops": 53.99999991,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.c is not null"
}
@@ -18951,11 +20747,14 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t3.c"],
+ "loops": 80.99999987,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -18971,7 +20770,9 @@ EXPLAIN
"table_name": "t4",
"access_type": "ALL",
"possible_keys": ["idx_c"],
+ "loops": 1,
"rows": 160,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -19023,7 +20824,7 @@ a c
explain extended SELECT * FROM t4 WHERE c IN ( SELECT c FROM v1 ) and a < 2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t4 range a a 5 NULL 1 100.00 Using index condition; Using where
-1 PRIMARY <derived3> ref key0 key0 128 test.t4.c 2 100.00 FirstMatch(t4)
+1 PRIMARY <derived3> ref key0 key0 128 test.t4.c 1 100.00 FirstMatch(t4)
3 LATERAL DERIVED t3 ref c c 128 test.t4.c 2 100.00
3 LATERAL DERIVED <subquery4> eq_ref distinct_key distinct_key 4 func 1 100.00
4 MATERIALIZED t1 ALL NULL NULL NULL NULL 3 100.00
@@ -19035,6 +20836,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -19044,7 +20846,9 @@ EXPLAIN
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t4.a < 2",
"attached_condition": "t4.c is not null"
@@ -19059,13 +20863,16 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t4.c"],
- "rows": 2,
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"first_match": "t4",
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"outer_ref_condition": "t4.c is not null",
"nested_loop": [
@@ -19078,7 +20885,9 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t4.c"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -19102,7 +20911,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -19111,7 +20922,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 3,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -19187,7 +21000,7 @@ DROP TABLE t1;
CREATE TABLE t1 (pk1 INT PRIMARY KEY, f INT) ENGINE=Aria;
INSERT INTO t1 VALUES (1,0),(2,0);
CREATE TABLE t2 (pk2 INT PRIMARY KEY) ENGINE=Aria;
-INSERT INTO t2 VALUES (1),(2),(3);
+INSERT INTO t2 VALUES (1),(2),(3),(11),(12),(13);
CREATE VIEW v2 AS SELECT pk2, COUNT(*) AS cnt FROM t2 GROUP BY pk2;
SELECT * FROM t1 INNER JOIN v2 ON pk1 = pk2 WHERE f <> 5;
pk1 f pk2 cnt
@@ -19196,7 +21009,7 @@ pk1 f pk2 cnt
EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN v2 ON pk1 = pk2 WHERE f <> 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 2 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 4 test.t1.pk1 2 100.00
+1 PRIMARY <derived2> ref key0 key0 4 test.t1.pk1 1 100.00
2 LATERAL DERIVED t2 eq_ref PRIMARY PRIMARY 4 test.t1.pk1 1 100.00 Using index
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`f` AS `f`,`v2`.`pk2` AS `pk2`,`v2`.`cnt` AS `cnt` from `test`.`t1` join `test`.`v2` where `v2`.`pk2` = `test`.`t1`.`pk1` and `test`.`t1`.`f` <> 5
@@ -19205,13 +21018,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
"possible_keys": ["PRIMARY"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f <> 5"
}
@@ -19225,12 +21041,15 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["pk2"],
"ref": ["test.t1.pk1"],
- "rows": 2,
+ "loops": 2,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -19241,7 +21060,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["pk2"],
"ref": ["test.t1.pk1"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"using_index": true
}
@@ -19414,7 +21235,7 @@ GROUP BY t1.b,t2.c) dt
WHERE t3.d = dt.b;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 5 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t3.d 2 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t3.d 1 100.00
2 LATERAL DERIVED t1 ref idx_b idx_b 5 test.t3.d 1 100.00 Using index; Using temporary; Using filesort
2 LATERAL DERIVED t2 ALL NULL NULL NULL NULL 5 100.00 Using join buffer (flat, BNL join)
Warnings:
@@ -19449,7 +21270,7 @@ left join
on u.id=auditlastlogin.userid;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY u ALL NULL NULL NULL NULL 2
-1 PRIMARY <derived2> ref key0 key0 5 test.u.id 2
+1 PRIMARY <derived2> ref key0 key0 5 test.u.id 1
2 DERIVED au ALL NULL NULL NULL NULL 4 Using temporary; Using filesort
select * from t1 as u
left join
@@ -19504,9 +21325,9 @@ id a
explain extended select id, a from t1 where id in (select id from v1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 20 100.00
-1 PRIMARY <derived3> ref key0 key0 4 test.t1.id 2 100.00 FirstMatch(t1)
-3 LATERAL DERIVED t1 eq_ref PRIMARY PRIMARY 4 test.t1.id 1 100.00
-3 LATERAL DERIVED t2 ref ro_id ro_id 4 test.t1.id 1 100.00 Using where
+1 PRIMARY <derived3> ref key0 key0 4 test.t1.id 2 50.00 FirstMatch(t1)
+3 DERIVED t1 ALL PRIMARY NULL NULL NULL 20 100.00 Using temporary; Using filesort
+3 DERIVED t2 ref ro_id ro_id 4 test.t1.id 1 100.00 Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`v1`) where `v1`.`id` = `test`.`t1`.`id`
select id, a from t1
@@ -19542,11 +21363,11 @@ on (t1.id = t2.ro_id AND t2.flag = 1)
group by t1.id) dt);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 20 100.00
-1 PRIMARY <derived3> ref key0 key0 4 test.t1.id 2 100.00 FirstMatch(t1)
-3 LATERAL DERIVED t1 eq_ref PRIMARY PRIMARY 4 test.t1.id 1 100.00
-3 LATERAL DERIVED t2 ref ro_id ro_id 4 test.t1.id 1 100.00 Using where
+1 PRIMARY <derived3> ref key1,distinct_key key1 4 test.t1.id 2 50.00 FirstMatch(t1)
+3 DERIVED t1 ALL PRIMARY NULL NULL NULL 20 100.00 Using temporary; Using filesort
+3 DERIVED t2 ref ro_id ro_id 4 test.t1.id 1 100.00 Using where
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`a` AS `a` from `test`.`t1` semi join ((/* select#3 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`a` AS `a` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`ro_id` = `test`.`t1`.`id` and `test`.`t2`.`flag` = 1) where `test`.`t1`.`id` = `test`.`t1`.`id` group by `test`.`t1`.`id`) `dt`) where `dt`.`id` = `test`.`t1`.`id`
+Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`a` AS `a` from `test`.`t1` semi join ((/* select#3 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`a` AS `a` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`ro_id` = `test`.`t1`.`id` and `test`.`t2`.`flag` = 1) where 1 group by `test`.`t1`.`id`) `dt`) where `dt`.`id` = `test`.`t1`.`id`
drop view v1;
drop table t1,t2;
#
@@ -19648,21 +21469,28 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
- "access_type": "ALL",
+ "access_type": "const",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["const"],
+ "loops": 1,
"r_loops": 1,
- "rows": 4,
- "r_rows": 2,
+ "rows": 1,
+ "r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
- "r_filtered": 50,
- "attached_condition": "v1.a = 3",
+ "r_filtered": 100,
"materialized": {
"query_block": {
"union_result": {
@@ -19674,6 +21502,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -19681,9 +21510,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -19724,7 +21555,8 @@ create table t1 (id int, a int, index (a), index (id, a)) engine=myisam;
insert into t1 values
(17,1),(17,3010),(17,3013),(17,3053),(21,2446),(21,2467),(21,2);
create table t2 (a int) engine=myisam;
-insert into t2 values (1),(2),(3);
+insert into t2 values (1),(2),(3),(1000),(2000),(3000);
+insert into t2 select 5000 from seq_5000_to_6000;
create table t3 (id int) engine=myisam;
insert into t3 values (1),(2);
analyze table t1,t2,t3;
@@ -19746,22 +21578,25 @@ where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where
1 PRIMARY t1 ref a a 5 test.t3.id 1
+1 PRIMARY <derived2> ref key0 key0 5 test.t3.id 1
1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1
-1 PRIMARY <derived2> ref key0 key0 5 test.t3.id 2
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
-2 DERIVED cp2 range NULL a 5 NULL 8 Using index for group-by
+3 MATERIALIZED t2 ALL NULL NULL NULL NULL 1007
+2 DERIVED cp2 range NULL a 5 NULL 7 Using index for group-by
explain format=json select * from t1, (select a from t1 cp2 group by a) dt, t3
where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2);
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.`id` is not null and t3.`id` is not null"
}
@@ -19775,32 +21610,42 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t3.id"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
{
"table": {
- "table_name": "<subquery3>",
- "access_type": "eq_ref",
- "possible_keys": ["distinct_key"],
- "key": "distinct_key",
- "key_length": "4",
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
"used_key_parts": ["a"],
- "ref": ["func"],
+ "ref": ["test.t3.id"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
- "unique": 1,
"query_block": {
- "select_id": 3,
+ "select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
- "table_name": "t2",
- "access_type": "ALL",
- "rows": 3,
- "filtered": 100
+ "table_name": "cp2",
+ "access_type": "range",
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "loops": 1,
+ "rows": 7,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "using_index_for_group_by": true
}
}
]
@@ -19810,29 +21655,28 @@ EXPLAIN
},
{
"table": {
- "table_name": "<derived2>",
- "access_type": "ref",
- "possible_keys": ["key0"],
- "key": "key0",
- "key_length": "5",
+ "table_name": "<subquery3>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "4",
"used_key_parts": ["a"],
- "ref": ["test.t3.id"],
- "rows": 2,
+ "ref": ["func"],
+ "rows": 1,
"filtered": 100,
"materialized": {
+ "unique": 1,
"query_block": {
- "select_id": 2,
+ "select_id": 3,
"nested_loop": [
{
"table": {
- "table_name": "cp2",
- "access_type": "range",
- "key": "a",
- "key_length": "5",
- "used_key_parts": ["a"],
- "rows": 8,
- "filtered": 100,
- "using_index_for_group_by": true
+ "table_name": "t2",
+ "access_type": "ALL",
+ "loops": 1,
+ "rows": 1007,
+ "cost": "COST_REPLACED",
+ "filtered": 100
}
}
]
@@ -19854,9 +21698,9 @@ where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where
1 PRIMARY t1 ref a a 5 test.t3.id 1
+1 PRIMARY <derived2> ref key0 key0 5 test.t3.id 1
1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1
-1 PRIMARY <derived2> ref key0 key0 5 test.t3.id 2
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+3 MATERIALIZED t2 ALL NULL NULL NULL NULL 1007
2 LATERAL DERIVED cp2 ref a a 5 test.t1.a 1 Using where; Using index
explain format=json select * from t1, (select a from t1 cp2 group by a) dt, t3
where dt.a = t1.a and t1.a = t3.id and t1.a in (select a from t2);
@@ -19864,12 +21708,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.`id` is not null and t3.`id` is not null"
}
@@ -19883,41 +21730,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t3.id"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
{
"table": {
- "table_name": "<subquery3>",
- "access_type": "eq_ref",
- "possible_keys": ["distinct_key"],
- "key": "distinct_key",
- "key_length": "4",
- "used_key_parts": ["a"],
- "ref": ["func"],
- "rows": 1,
- "filtered": 100,
- "materialized": {
- "unique": 1,
- "query_block": {
- "select_id": 3,
- "nested_loop": [
- {
- "table": {
- "table_name": "t2",
- "access_type": "ALL",
- "rows": 3,
- "filtered": 100
- }
- }
- ]
- }
- }
- }
- },
- {
- "table": {
"table_name": "<derived2>",
"access_type": "ref",
"possible_keys": ["key0"],
@@ -19925,12 +21745,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t3.id"],
- "rows": 2,
+ "loops": 2,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"outer_ref_condition": "t1.a is not null",
"nested_loop": [
{
@@ -19942,7 +21765,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "cp2.a = t3.`id`",
"using_index": true
@@ -19952,6 +21777,37 @@ EXPLAIN
}
}
}
+ },
+ {
+ "table": {
+ "table_name": "<subquery3>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "4",
+ "used_key_parts": ["a"],
+ "ref": ["func"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 3,
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "loops": 1,
+ "rows": 1007,
+ "cost": "COST_REPLACED",
+ "filtered": 100
+ }
+ }
+ ]
+ }
+ }
+ }
}
]
}
@@ -19969,7 +21825,7 @@ id a a id
deallocate prepare stmt;
drop table t1,t2,t3;
#
-# MDEV-MDEV-27132: Splittable derived with equality in WHERE
+# MDEV-27132: Splittable derived with equality in WHERE
#
CREATE TABLE t1 (
id int PRIMARY KEY
@@ -20010,6 +21866,7 @@ INSERT INTO t2(deleted, t1_id, email, reporting_person)
SELECT deleted, t1_id+80000, email, reporting_person FROM t2;
INSERT INTO t2(deleted, t1_id, email, reporting_person)
SELECT deleted, t1_id+160000, email, reporting_person FROM t2;
+insert into t2 (id,t1_id) select -seq,-seq from seq_1_to_1000;
CREATE TABLE t3 (
id int PRIMARY KEY,
deleted int,
@@ -20062,8 +21919,8 @@ WHERE t1.id BETWEEN 200 AND 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 range t1_id t1_id 5 NULL 47 Using where; Using index
1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t3.t1_id 1 Using index
-1 PRIMARY <derived2> ref key0 key0 5 test.t3.t1_id 2
-2 LATERAL DERIVED t2 ref t1_id t1_id 5 test.t1.id 3 Using index condition; Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t3.t1_id 1
+2 LATERAL DERIVED t2 ref t1_id t1_id 5 test.t1.id 1 Using index condition; Using where
EXPLAIN FORMAT=JSON SELECT t1.id
FROM t1
JOIN t3
@@ -20075,6 +21932,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -20084,7 +21942,9 @@ EXPLAIN
"key": "t1_id",
"key_length": "5",
"used_key_parts": ["t1_id"],
+ "loops": 1,
"rows": 47,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.t1_id between 200 and 100000 and t3.t1_id is not null",
"using_index": true
@@ -20099,7 +21959,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["id"],
"ref": ["test.t3.t1_id"],
+ "loops": 47,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"using_index": true
}
@@ -20113,12 +21975,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["t1_id"],
"ref": ["test.t3.t1_id"],
- "rows": 2,
+ "loops": 47,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -20129,8 +21994,10 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["t1_id"],
"ref": ["test.t1.id"],
- "rows": 3,
- "filtered": 59.09090805,
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 34.55045319,
"index_condition": "t2.t1_id between 200 and 100000 and t2.t1_id = t3.t1_id",
"attached_condition": "t2.reporting_person = 1"
}
@@ -20152,6 +22019,18 @@ JOIN (SELECT t1_id FROM t2 WHERE reporting_person = 1 GROUP BY t1_id) tx
ON tx.t1_id = t1.id
WHERE t1.id BETWEEN 200 AND 100000;
id
+EXPLAIN SELECT t1.id
+FROM t1
+JOIN t3
+ON t3.t1_id = t1.id
+JOIN (SELECT t1_id FROM t2 WHERE reporting_person = 1 GROUP BY t1_id) tx
+ON tx.t1_id = t1.id
+WHERE t1.id BETWEEN 200 AND 100000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t3 range t1_id t1_id 5 NULL 47 Using where; Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t3.t1_id 1 Using index
+1 PRIMARY <derived2> ref key0 key0 5 test.t3.t1_id 10
+2 DERIVED t2 ALL t1_id NULL NULL NULL 2408 Using where; Using temporary; Using filesort
set optimizer_switch='split_materialized=default';
DROP TABLE t1,t2,t3;
#
@@ -20274,8 +22153,8 @@ ON from_agg_items.charge_id = charges.id AND
from_agg_items.ledger_id = charges.from_ledger_id
WHERE charges.to_ledger_id = 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY charges ALL PRIMARY,fk_charge_from_ledger,fk_charge_to_ledger NULL NULL NULL 20 Using where
-1 PRIMARY <derived2> ref key0 key0 17 test.charges.from_ledger_id,test.charges.id 2
+1 PRIMARY charges ref PRIMARY,fk_charge_from_ledger,fk_charge_to_ledger fk_charge_to_ledger 8 const 8
+1 PRIMARY <derived2> ref key0 key0 17 test.charges.from_ledger_id,test.charges.id 1
2 LATERAL DERIVED transaction_items ref fk_items_transaction,fk_items_charge fk_items_charge 9 test.charges.id 2
2 LATERAL DERIVED transactions eq_ref PRIMARY,fk_transactions_ledger PRIMARY 8 test.transaction_items.transaction_id 1 Using where
EXPLAIN FORMAT=JSON SELECT
@@ -20300,19 +22179,25 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "charges",
- "access_type": "ALL",
+ "access_type": "ref",
"possible_keys": [
"PRIMARY",
"fk_charge_from_ledger",
"fk_charge_to_ledger"
],
- "rows": 20,
- "filtered": 40,
- "attached_condition": "charges.to_ledger_id = 2"
+ "key": "fk_charge_to_ledger",
+ "key_length": "8",
+ "used_key_parts": ["to_ledger_id"],
+ "ref": ["const"],
+ "loops": 1,
+ "rows": 8,
+ "cost": "COST_REPLACED",
+ "filtered": 100
}
},
{
@@ -20324,12 +22209,15 @@ EXPLAIN
"key_length": "17",
"used_key_parts": ["ledger_id", "charge_id"],
"ref": ["test.charges.from_ledger_id", "test.charges.id"],
- "rows": 2,
+ "loops": 8,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -20340,7 +22228,9 @@ EXPLAIN
"key_length": "9",
"used_key_parts": ["charge_id"],
"ref": ["test.charges.id"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -20353,7 +22243,9 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["id"],
"ref": ["test.transaction_items.transaction_id"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "transactions.ledger_id = charges.from_ledger_id"
}
@@ -20413,7 +22305,7 @@ ON from_agg_items.charge_id = charges.id AND
from_agg_items.ledger_id = charges.from_ledger_id
WHERE charges.to_ledger_id = 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY charges ALL PRIMARY,fk_charge_from_ledger,fk_charge_to_ledger NULL NULL NULL 20 Using where
+1 PRIMARY charges ref PRIMARY,fk_charge_from_ledger,fk_charge_to_ledger fk_charge_to_ledger 8 const 8
1 PRIMARY <derived2> ref key0 key0 17 test.charges.from_ledger_id,test.charges.id 4
2 DERIVED transaction_items ALL fk_items_transaction NULL NULL NULL 40 Using temporary; Using filesort
2 DERIVED transactions eq_ref PRIMARY PRIMARY 8 test.transaction_items.transaction_id 1
@@ -20468,8 +22360,8 @@ ON from_agg_items.charge_id = charges.id AND
from_agg_items.ledger_id = charges.from_ledger_id
WHERE charges.to_ledger_id = 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY charges ALL fk_charge_to_ledger NULL NULL NULL 20 Using where
-1 PRIMARY <derived2> ref key0 key0 18 test.charges.from_ledger_id,test.charges.id 2
+1 PRIMARY charges ref fk_charge_to_ledger fk_charge_to_ledger 8 const 10
+1 PRIMARY <derived2> ref key0 key0 18 test.charges.from_ledger_id,test.charges.id 1
2 LATERAL DERIVED transaction_items ref fk_items_transaction,fk_items_charge fk_items_charge 9 test.charges.id 2
2 LATERAL DERIVED transactions eq_ref PRIMARY,fk_transactions_ledger PRIMARY 8 test.transaction_items.transaction_id 1 Using where
EXPLAIN FORMAT=JSON SELECT
@@ -20494,15 +22386,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "charges",
- "access_type": "ALL",
+ "access_type": "ref",
"possible_keys": ["fk_charge_to_ledger"],
- "rows": 20,
- "filtered": 50,
- "attached_condition": "charges.to_ledger_id = 2"
+ "key": "fk_charge_to_ledger",
+ "key_length": "8",
+ "used_key_parts": ["to_ledger_id"],
+ "ref": ["const"],
+ "loops": 1,
+ "rows": 10,
+ "cost": "COST_REPLACED",
+ "filtered": 100
}
},
{
@@ -20514,12 +22412,15 @@ EXPLAIN
"key_length": "18",
"used_key_parts": ["ledger_id", "charge_id"],
"ref": ["test.charges.from_ledger_id", "test.charges.id"],
- "rows": 2,
+ "loops": 10,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"lateral": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -20530,7 +22431,9 @@ EXPLAIN
"key_length": "9",
"used_key_parts": ["charge_id"],
"ref": ["test.charges.id"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -20543,7 +22446,9 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["id"],
"ref": ["test.transaction_items.transaction_id"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "transactions.ledger_id = charges.from_ledger_id"
}
@@ -20605,7 +22510,7 @@ ON from_agg_items.charge_id = charges.id AND
from_agg_items.ledger_id = charges.from_ledger_id
WHERE charges.to_ledger_id = 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY charges ALL fk_charge_to_ledger NULL NULL NULL 20 Using where
+1 PRIMARY charges ref fk_charge_to_ledger fk_charge_to_ledger 8 const 10
1 PRIMARY <derived2> ref key0 key0 18 test.charges.from_ledger_id,test.charges.id 4
2 DERIVED transaction_items ALL fk_items_transaction NULL NULL NULL 40 Using temporary; Using filesort
2 DERIVED transactions eq_ref PRIMARY PRIMARY 8 test.transaction_items.transaction_id 1
@@ -20672,7 +22577,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 const PRIMARY,oid PRIMARY 4 const 1
1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1 Using index
1 PRIMARY t2 const PRIMARY PRIMARY 4 const 1 Using index
-1 PRIMARY t4 const PRIMARY,a NULL NULL NULL 1 Impossible ON condition
+1 PRIMARY t4 const PRIMARY,a NULL NULL NULL 0 Impossible ON condition
1 PRIMARY <derived3> ref key0 key0 5 const 0 Using where
3 LATERAL DERIVED t5 ref id1 id1 5 const 0 Using index
DROP VIEW v1;
diff --git a/mysql-test/main/derived_cond_pushdown.test b/mysql-test/main/derived_cond_pushdown.test
index dc454bf80de..b69bb93f627 100644
--- a/mysql-test/main/derived_cond_pushdown.test
+++ b/mysql-test/main/derived_cond_pushdown.test
@@ -112,6 +112,7 @@ let $query= select * from v1,t2 where (v1.max_c>214) and (t2.a>v1.a);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -122,6 +123,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # extracted or formula : pushing into HAVING
@@ -131,6 +133,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -140,6 +143,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # conjunctive subformula : pushing into WHERE
@@ -147,12 +151,14 @@ let $query= select * from v1,t2 where (v1.a>6) and (t2.b>v1.b);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query= select * from v2,t2 where (v2.b>25) and (t2.a<v2.a);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # extracted or formula : pushing into WHERE
@@ -162,6 +168,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -170,6 +177,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -179,6 +187,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # conjunctive subformulas : pushing into HAVING and WHERE
@@ -187,6 +196,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -195,6 +205,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -203,6 +214,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # extracted or formula : pushing into HAVING and WHERE
@@ -213,6 +225,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -220,6 +233,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # extracted or formulas : pushing into WHERE and HAVING
@@ -229,6 +243,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -238,6 +253,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # prepare of a query containing extracted or formula
@@ -249,7 +265,9 @@ deallocate prepare stmt;
prepare stmt from
"explain format=json select * from v1,t2 where
((v1.max_c>400) and (t2.a>v1.a)) or ((v1.max_c<135) and (t2.a<v1.a));";
+--source include/explain-no-costs.inc
execute stmt;
+--source include/explain-no-costs.inc
execute stmt;
deallocate prepare stmt;
@@ -260,12 +278,14 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query= select * from v1,t2 where (v1.a=5) and (v1.max_c=t2.d);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # conjunctive subformula : pushing into WHERE using equalities
@@ -273,6 +293,7 @@ let $query= select * from v1,t2 where (t2.a<5) and (v1.a=t2.a);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -280,6 +301,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # conjunctive subformula : pushing into HAVING using equalities
@@ -287,6 +309,7 @@ let $query= select * from v1,t2 where (t2.c>150) and (v1.max_c=t2.c);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # extracted and formula : pushing into WHERE
@@ -296,6 +319,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -303,6 +327,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -311,6 +336,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # extracted and formula : pushing into WHERE using equalities
@@ -321,6 +347,7 @@ select * from v_decimal as v,t2_decimal as t where
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # extracted or formula : pushing into HAVING using equalities
@@ -330,6 +357,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # conjunctive subformulas : pushing into WHERE and HAVING using equalities
@@ -339,6 +367,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # conjunctive subformulas : pushing into WHERE and HAVING
@@ -351,6 +380,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # conjunctive subformulas : pushing into WHERE and HAVING
@@ -360,6 +390,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # conjunctive subformula : pushing into WHERE
@@ -371,6 +402,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # conjunctive subformula : pushing into WHERE
@@ -381,6 +413,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # extracted or formula : pushing into WHERE
@@ -392,6 +425,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -402,6 +436,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # nothing to push
@@ -410,12 +445,14 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.b=t2.b);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -424,6 +461,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -431,6 +469,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -439,6 +478,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using several derived tables : nothing to push
@@ -447,6 +487,7 @@ let $query= select * from v1,v2,t2 where
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -455,6 +496,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -463,6 +505,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using several derived tables : pushing in all tables
@@ -475,6 +518,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using several derived tables : pushing only in one table
@@ -486,6 +530,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using several derived tables : pushing only in one table
@@ -497,6 +542,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # extracted or formula : pushing into WHERE
@@ -508,6 +554,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using several derived tables : pushing in all tables
@@ -521,6 +568,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using several derived tables : pushing in all tables
@@ -535,6 +583,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using several derived tables : pushing in all tables
@@ -553,6 +602,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # extracted or formula : pushing into HAVING
@@ -565,6 +615,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # extracted and formula : pushing into WHERE
@@ -577,6 +628,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using query with union
@@ -589,6 +641,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using query with union
@@ -602,6 +655,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using query with union
@@ -616,6 +670,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using query with union
@@ -634,6 +689,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using derived table with union
@@ -642,6 +698,7 @@ let $query= select * from v_union,t2 where (v_union.a<3) and (v_union.c>100);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using derived table with union
@@ -653,6 +710,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using derived table with union
@@ -665,6 +723,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
@@ -673,6 +732,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using derived table with union
@@ -686,6 +746,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using derived table with union
@@ -698,6 +759,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using derived table with union of selects without aggregation
@@ -707,6 +769,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using derived table with union of selects without aggregation
@@ -716,6 +779,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using derived table with union of selects without aggregation
@@ -726,6 +790,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using derived table with union of
@@ -736,6 +801,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using derived table with union of
@@ -750,6 +816,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using embedded derived table : pushing the same conditions
@@ -762,6 +829,7 @@ select * from v4,v1 where
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using embedded view : nothing to push
@@ -773,6 +841,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using embedded view : pushing different conditions
@@ -786,6 +855,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using embedded view : pushing different conditions
@@ -798,6 +868,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using embedded view : pushing different conditions
@@ -810,6 +881,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using embedded view : pushing different conditions
@@ -823,6 +895,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using embedded view : pushing the same conditions
@@ -836,6 +909,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using embedded view : pushing the same conditions
@@ -849,6 +923,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using embedded view : pushing the same conditions
@@ -863,6 +938,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using embedded view : pushing the same conditions
@@ -875,6 +951,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1,v2,v3,v4;
@@ -982,6 +1059,7 @@ SELECT * FROM t1 WHERE a IN (
)
);
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM t1 WHERE a IN (
SELECT b FROM v2 WHERE b < a OR b IN (
@@ -1004,6 +1082,7 @@ SELECT * FROM t1 WHERE a IN (
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
DROP VIEW v2,v3,v4;
@@ -1026,6 +1105,7 @@ SELECT * FROM
( SELECT * FROM t1
WHERE EXISTS ( SELECT * FROM v2 WHERE b = a ) ) AS sq;
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM
( SELECT * FROM t1
@@ -1051,6 +1131,7 @@ SELECT * FROM t1 LEFT JOIN t2 ON a = b WHERE b IS NULL;
SELECT * FROM t1 LEFT JOIN v2 ON a = b WHERE b IS NULL;
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM t1 LEFT JOIN v2 ON a = b WHERE b IS NULL;
@@ -1065,6 +1146,7 @@ CREATE TABLE t1 (i INT);
CREATE OR REPLACE ALGORITHM=TEMPTABLE VIEW v1 AS SELECT * FROM t1;
INSERT INTO t1 VALUES (1),(2);
--enable_prepare_warnings
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM v1 WHERE i <= ANY ( SELECT 3 );
--disable_prepare_warnings
@@ -1092,12 +1174,14 @@ CREATE ALGORITHM=TEMPTABLE VIEW v2 AS SELECT * FROM t2;
SELECT * FROM v1 AS sq
WHERE b IN ( SELECT pk2 FROM v2 WHERE c > sq.b ) OR b = 100;
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM v1 AS sq
WHERE b IN ( SELECT pk2 FROM v2 WHERE c > sq.b ) OR b = 100;
SELECT * FROM ( SELECT * FROM t1 ) AS sq
WHERE b IN ( SELECT pk2 FROM v2 WHERE c > sq.b ) OR b = 100;
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM ( SELECT * FROM t1 ) AS sq
WHERE b IN ( SELECT pk2 FROM v2 WHERE c > sq.b ) OR b = 100;
@@ -1117,6 +1201,7 @@ INSERT INTO t2 VALUES (50);
CREATE ALGORITHM=TEMPTABLE VIEW v1 AS SELECT * FROM t1;
SELECT ( SELECT COUNT(*) FROM v1 WHERE a = t2.b ) AS f FROM t2 GROUP BY f;
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT ( SELECT COUNT(*) FROM v1 WHERE a = t2.b ) AS f FROM t2 GROUP BY f;
@@ -1124,6 +1209,7 @@ CREATE TABLE t3 (a INT, b INT) ENGINE=MYISAM;
INSERT INTO t3 VALUES (1,10),(3,11),(2,10),(2,20),(3,21);
CREATE VIEW v2 AS SELECT a, sum(b) AS s FROM t3 GROUP BY a ;
SELECT ( SELECT COUNT(*) FROM v2 WHERE s < t2.b ) AS f FROM t2 GROUP BY f;
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT ( SELECT COUNT(*) FROM v2 WHERE s < t2.b ) AS f FROM t2 GROUP BY f;
@@ -1145,6 +1231,7 @@ INSERT INTO t2 VALUES (5),(6);
SELECT a, GROUP_CONCAT(b) FROM v1
WHERE b IN ( SELECT COUNT(c) FROM t2 ) GROUP BY a;
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT a, GROUP_CONCAT(b) FROM v1
WHERE b IN ( SELECT COUNT(c) FROM t2 ) GROUP BY a;
@@ -1163,6 +1250,7 @@ INSERT INTO t VALUES (1,1),(3,2);
SELECT * FROM v AS v1, v AS v2
WHERE v2.pk > v1.f AND v1.f IN ( SELECT COUNT(pk) FROM t );
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM v AS v1, v AS v2
WHERE v2.pk > v1.f AND v1.f IN ( SELECT COUNT(pk) FROM t );
@@ -1182,6 +1270,7 @@ INSERT INTO t2 VALUES (3),(4);
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
@@ -1200,6 +1289,7 @@ INSERT INTO t2 VALUES (3.2),(2.71);
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
@@ -1214,6 +1304,7 @@ INSERT INTO t2 VALUES (3.21),(4.55);
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
@@ -1228,6 +1319,7 @@ INSERT INTO t2 VALUES ('bbb'),('aa');
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
@@ -1244,6 +1336,7 @@ INSERT INTO t2 VALUES
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
@@ -1258,6 +1351,7 @@ INSERT INTO t2 VALUES ('2007-05-28'), ('2010-08-25');
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
@@ -1272,6 +1366,7 @@ INSERT INTO t2 VALUES ('10:00:02'), ('11:00:10');
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM ( SELECT DISTINCT * FROM t1 ) AS sq
WHERE i IN ( SELECT MIN(j) FROM t2 );
@@ -1288,6 +1383,7 @@ CREATE OR REPLACE ALGORITHM=TEMPTABLE VIEW v1 AS SELECT * FROM t1;
INSERT INTO t1 VALUES (2), (1);
SELECT * FROM v1 WHERE NULLIF(1, i);
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM v1 WHERE NULLIF(1, i);
@@ -1306,6 +1402,7 @@ CREATE TABLE t2 (c VARCHAR(3));
INSERT INTO t2 VALUES ('foo'),('xyz');
SELECT * FROM v1 WHERE v1.c IN ( SELECT MIN(c) FROM t2 WHERE 0 );
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM v1 WHERE v1.c IN ( SELECT MIN(c) FROM t2 WHERE 0 );
@@ -1340,7 +1437,9 @@ PREPARE stmt1 FROM
PREPARE stmt2 FROM
"EXPLAIN FORMAT=JSON
SELECT * FROM v1 WHERE 1 IN (SELECT a FROM t2) OR c = 'foo'";
+--source include/explain-no-costs.inc
EXECUTE stmt1;
+--source include/explain-no-costs.inc
EXECUTE stmt2;
INSERT INTO t2 SELECT a+1 FROM t2;
INSERT INTO t2 SELECT a+1 FROM t2;
@@ -1348,7 +1447,9 @@ INSERT INTO t2 SELECT a+1 FROM t2;
INSERT INTO t2 SELECT a+1 FROM t2;
INSERT INTO t2 SELECT a+1 FROM t2;
INSERT INTO t2 SELECT a+1 FROM t2;
+--source include/explain-no-costs.inc
EXECUTE stmt1;
+--source include/explain-no-costs.inc
EXECUTE stmt2;
DEALLOCATE PREPARE stmt1;
# the result here will change after the merge with the fix for mdev-11859
@@ -1485,6 +1586,7 @@ from ( select t1.a, v1.b, v1.s from t1, v1 where t1.a = v1.b ) as t
where b > 2;
eval $q1;
+--source include/explain-no-costs.inc
eval explain format=json $q1;
let $q2=
@@ -1493,6 +1595,7 @@ from ( select t1.a, v1.b, v1.s from t1, v1 where t1.a = v1.b ) as t
where a > 2;
eval $q2;
+--source include/explain-no-costs.inc
eval explain format=json $q2;
let $q3=
@@ -1501,6 +1604,7 @@ from ( select t1.a, v2.b, v2.c from t1, v2 where t1.a = v2.b ) as t
where a > 2;
eval $q3;
+--source include/explain-no-costs.inc
eval explain format=json $q3;
let $q4=
@@ -1509,6 +1613,7 @@ from ( select t1.a, v3.b, v3.m from t1, v3 where t1.a = v3.m ) as t
where a > 2;
eval $q4;
+--source include/explain-no-costs.inc
eval explain format=json $q4;
drop view v1,v2,v3;
@@ -1526,6 +1631,7 @@ let $q=
SELECT * FROM ( SELECT * FROM v1 ) AS sq WHERE f > 0;
eval $q;
+--source include/explain-no-costs.inc
eval explain format=json $q;
DROP VIEW v1;
@@ -1548,6 +1654,7 @@ SELECT * FROM t1, ( SELECT * FROM v2 ) AS sq
WHERE i1 = 1 AND ( i1 = i2 OR i1 = 2 );
eval $q;
+--source include/explain-no-costs.inc
eval explain format=json $q;
DROP VIEW v2;
@@ -1566,6 +1673,7 @@ from ( select distinct regexp_substr(t1.a,'^[A-Za-z]+') as f from t1) as t
where t.f = 'a' or t.f = 'b';
eval $q;
+--source include/explain-no-costs.inc
eval explain format=json $q;
drop table t1;
@@ -1589,6 +1697,7 @@ SELECT * FROM t1
WHERE (vc.ct>0);
eval $q;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q;
DROP TABLE t1,t2;
@@ -1619,6 +1728,7 @@ WHERE (v1.a=1) AND (v1.b=v1.a) AND
(v1.a=t2.x) AND (v1.max_c>30);
eval $query;
eval EXPLAIN $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
let $query=
@@ -1634,6 +1744,7 @@ WHERE (v1.a=1) AND (v1.b=v1.a) AND (v1.b=v1.d) AND
(v1.a=t2.x) AND (v1.max_c>30);
eval $query;
eval EXPLAIN $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
DROP TABLE t1,t2;
@@ -1676,6 +1787,7 @@ WHERE (t1.a,t1.b) IN
;
eval $query;
eval EXPLAIN $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
let $query=
@@ -1694,6 +1806,7 @@ WHERE (t1.a,t1.b) IN
;
eval $query;
eval EXPLAIN $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
let $query=
@@ -1712,6 +1825,7 @@ WHERE (t1.a,t1.b) IN
;
eval $query;
eval EXPLAIN $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
let $query=
@@ -1730,6 +1844,7 @@ WHERE (t1.a,t1.b) IN
;
eval $query;
eval EXPLAIN $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
DROP TABLE t1,t2;
@@ -1768,6 +1883,7 @@ SELECT * FROM
) dt
WHERE (dt.a=2);
eval $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
let $query=
@@ -1780,6 +1896,7 @@ SELECT * FROM
) dt
WHERE (dt.a>1);
eval $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
let $query=
@@ -1791,6 +1908,7 @@ SELECT * FROM
) dt
WHERE (dt.a='ab');
eval $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
let $query=
@@ -1802,6 +1920,7 @@ SELECT * FROM
) dt
WHERE (dt.a=1);
eval $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
DROP TABLE t1;
@@ -1830,6 +1949,7 @@ JOIN
) AS dt2
ON dt1.a = dt2.a;
eval $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
let $query=
@@ -1841,6 +1961,7 @@ SELECT * FROM
) AS dt, t1
WHERE dt.a=t1.a AND dt.a IN (1,t1.a);
eval $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
DROP TABLE t1;
@@ -1943,6 +2064,7 @@ let $q1=
INSERT INTO t3
SELECT * FROM (SELECT a, count(*) as c FROM t1 GROUP BY a) t WHERE a<=2;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q1;
eval $q1;
@@ -1952,6 +2074,7 @@ let $q2=
UPDATE t2, (SELECT a, count(*) as c FROM t1 GROUP BY a) t SET t2.a=t.c+10
WHERE t2.a= t.c and t.a>=3;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q2;
eval $q2;
@@ -1961,6 +2084,7 @@ let $q3=
DELETE t2 FROM t2, (SELECT a, count(*) as c FROM t1 GROUP BY a) t
WHERE t2.a= t.c+9 and t.a=2;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q3;
eval $q3;
@@ -1986,6 +2110,7 @@ FROM
) AS tab1
WHERE (tab1.max_a=1);
EVAL $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
LET $query=
@@ -1998,6 +2123,7 @@ FROM
) AS tab1
WHERE (tab1.max_a=1);
EVAL $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
LET $query=
@@ -2010,6 +2136,7 @@ FROM
) AS tab1
WHERE (tab1.max_a=1);
EVAL $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
LET $query=
@@ -2022,6 +2149,7 @@ FROM
) AS tab1
WHERE (tab1.max_a=1);
EVAL $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
DROP TABLE t1;
@@ -2061,6 +2189,7 @@ SELECT * FROM ( SELECT t1.f FROM v1 JOIN t1 ) AS t WHERE f IS NOT NULL;
eval $q1;
eval EXPLAIN $q1;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q1;
SELECT * FROM t1;
@@ -2073,6 +2202,7 @@ SELECT * FROM ( SELECT t1.f FROM v1 JOIN t1 ON v1.f=t1.f) AS t
WHERE f IS NOT NULL;
eval $q2;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q2;
SELECT * FROM t1;
@@ -2093,6 +2223,7 @@ CREATE VIEW v1 AS SELECT f2 FROM ( SELECT f2 FROM t1 ) AS t;
let $q1 =
UPDATE v1, t1 SET t1.f1 = 'z' WHERE v1.f2 < 2 AND t1.f2 = v1.f2;
eval $q1;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q1;
SELECT * FROM t1;
@@ -2101,6 +2232,7 @@ CREATE VIEW v2 AS SELECT f2 FROM ( SELECT DISTINCT f2 FROM t1 ) AS t;
let $q2 =
SELECT * FROM v2, t1 WHERE v2.f2 < 2 AND t1.f2 = v2.f2;
eval $q2;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q2;
DROP VIEW v1,v2;
@@ -2136,6 +2268,7 @@ CREATE TABLE t1 (a INT, b INT);
CREATE VIEW v1 AS SELECT a, MAX(b) FROM t1 GROUP BY a;
SELECT * FROM (SELECT 1 FROM v1 UNION (SELECT 1 FROM v1 WHERE @a := uuid())) dt;
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT * FROM (SELECT 1 FROM v1 UNION (SELECT 1 FROM v1 WHERE @a := uuid())) dt;
@@ -2280,6 +2413,7 @@ select col2, col1 from v2;
explain select * from v3 where col1=123;
--echo # This must use ref accesses for reading table t1, not full scans:
+--source include/explain-no-costs.inc
explain format=json
select * from v3 where col1=123 and col2=321;
@@ -2303,6 +2437,7 @@ select a, f1(a), sum(b) from t1 group by a, f1(a);
--echo # "a > 1" will be pushed all the way to the table scan on t1
--echo # "a2>123" will be pushed into HAVING (as it refers to an SP call which
--echo # prevents pushing it to the WHERE)
+--source include/explain-no-costs.inc
explain format=json
select * from v2 where (s+1) > 10 AND a > 1 and a2>123;
@@ -2315,6 +2450,7 @@ insert into t4 select a,a,a from t1;
--echo # The subquery must be materialized and must have
--echo # "attached_condition": "t1.a + 1 > 10",
--echo # "having_condition": "`f1(a)` > 1 and `sum(b)` > 123",
+--source include/explain-no-costs.inc
explain format=json
select *
from t4
@@ -2359,6 +2495,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a<5);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using intersect in view definition
@@ -2368,6 +2505,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a=8);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using intersect in view definition
@@ -2376,6 +2514,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (t2.a=8);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using intersect in view definition
@@ -2384,6 +2523,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.c>200);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using intersect in view definition
@@ -2393,6 +2533,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a<5) and (v1.c>110);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using intersect in view definition
@@ -2402,6 +2543,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using intersect in view definition
@@ -2412,6 +2554,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using intersect in view definition
@@ -2425,6 +2568,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using intersect in view definition
@@ -2456,6 +2600,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2473,6 +2618,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a<5);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using except in view definition
@@ -2482,6 +2628,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a=6);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using except in view definition
@@ -2490,6 +2637,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (t2.a=6);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using except in view definition
@@ -2498,6 +2646,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.c>500);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using except in view definition
@@ -2507,6 +2656,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a<5) and (v1.c>500);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using except in view definition
@@ -2516,6 +2666,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using except in view definition
@@ -2526,6 +2677,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using except in view definition
@@ -2540,6 +2692,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--echo # using except in view definition
@@ -2571,6 +2724,7 @@ let $query=
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2591,6 +2745,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a>5) and (v1.c>200);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2611,6 +2766,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.c<200);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2631,6 +2787,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a>5) and (v1.c>200);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2651,6 +2808,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.c<200);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2671,6 +2829,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.c<150);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2691,6 +2850,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.c<130);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2714,6 +2874,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.c<130);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2736,6 +2897,7 @@ let $query= select * from v2,t2 where (v2.a=t2.a) and (v2.a>4) and (v2.c<150);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1,v2;
@@ -2758,6 +2920,7 @@ let $query= select * from v2,t2 where (v2.a=t2.a) and (v2.a>4) and (v2.c<150);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1,v2;
@@ -2776,6 +2939,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a>4) and (v1.b>12) and
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2794,6 +2958,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a<2) and (v1.b<30) and
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2814,6 +2979,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and ((v1.a<2) or (v1.a<5)) and
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2835,6 +3001,7 @@ let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.a>1) and (v1.b > 12) a
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2858,6 +3025,7 @@ let $query= select * from v1,t2 where (v1.b=t2.b) and (v1.a<5);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2876,6 +3044,7 @@ let $query= select * from v1,t2 where (v1.b=t2.b) and (v1.a<4);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2894,6 +3063,7 @@ let $query= select * from v1,t2 where (v1.b=t2.b) and (v1.a<3);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
drop view v1;
@@ -2930,6 +3100,7 @@ eval $no_pushdown $q1;
--sorted_result
eval $q1;
eval explain $q1;
+--source include/explain-no-costs.inc
eval explain format=json $q1;
let $q2=
@@ -2946,6 +3117,7 @@ eval $no_pushdown $q2;
--sorted_result
eval $q2;
eval explain $q2;
+--source include/explain-no-costs.inc
eval explain format=json $q2;
let $q3=
@@ -2956,6 +3128,7 @@ from (select a, c, sum(b) over (partition by a,c) as s from t2) as t, t1
eval $no_pushdown $q3;
eval $q3;
eval explain $q3;
+--source include/explain-no-costs.inc
eval explain format=json $q3;
let $q4=
@@ -2974,6 +3147,7 @@ eval $no_pushdown $q4;
--sorted_result
eval $q4;
eval explain $q4;
+--source include/explain-no-costs.inc
eval explain format=json $q4;
let $q5=
@@ -2988,6 +3162,7 @@ eval $no_pushdown $q5;
--sorted_result
eval $q5;
eval explain $q5;
+--source include/explain-no-costs.inc
eval explain format=json $q5;
let $q6=
@@ -3002,6 +3177,7 @@ eval $no_pushdown $q6;
--sorted_result
eval $q6;
eval explain $q6;
+--source include/explain-no-costs.inc
eval explain format=json $q6;
let $q7=
@@ -3016,6 +3192,7 @@ eval $no_pushdown $q7;
--sorted_result
eval $q7;
eval explain $q7;
+--source include/explain-no-costs.inc
eval explain format=json $q7;
drop table t1,t2;
@@ -3063,6 +3240,7 @@ where t1.b < 3;
eval $no_splitting $q1;
eval $q1;
eval explain extended $q1;
+--source include/explain-no-costs.inc
eval explain format=json $q1;
eval prepare stmt from "$q1";
execute stmt;
@@ -3079,6 +3257,7 @@ where t1.b <= 5;
eval $no_splitting $q10;
eval $q10;
eval explain extended $q10;
+--source include/explain-no-costs.inc
eval explain format=json $q10;
eval prepare stmt from "$q10";
execute stmt;
@@ -3096,6 +3275,7 @@ from t1 left join
eval $no_splitting $q2;
eval $q2;
eval explain extended $q2;
+--source include/explain-no-costs.inc
eval explain format=json $q2;
create table t3 (a int, b int, c char(127), index idx_b(b)) engine=myisam;
@@ -3125,6 +3305,7 @@ where t3.b > 15;
eval $no_splitting $q3;
eval $q3;
eval explain extended $q3;
+--source include/explain-no-costs.inc
eval explain format=json $q3;
let $q30=
@@ -3137,6 +3318,7 @@ where t3.b <= 15;
eval $no_splitting $q30;
eval $q30;
eval explain extended $q30;
+--source include/explain-no-costs.inc
eval explain format=json $q30;
let $q4=
@@ -3149,6 +3331,7 @@ where t3.b > 15;
eval $no_splitting $q4;
eval $q4;
eval explain extended $q4;
+--source include/explain-no-costs.inc
eval explain format=json $q4;
let $q40=
@@ -3161,6 +3344,7 @@ where t3.b <= 15;
eval $no_splitting $q40;
eval $q40;
eval explain extended $q40;
+--source include/explain-no-costs.inc
eval explain format=json $q40;
drop index idx_a on t2;
@@ -3184,6 +3368,7 @@ eval $no_splitting $q5;
--sorted_result
eval $q5;
eval explain extended $q5;
+--source include/explain-no-costs.inc
eval explain format=json $q5;
let $q50=
@@ -3194,6 +3379,7 @@ where t2.b < 40 and t2.a=t3.a and t3.c=t.c;
eval $no_splitting $q50;
eval $q50;
eval explain extended $q50;
+--source include/explain-no-costs.inc
eval explain format=json $q50;
let $q6=
@@ -3206,6 +3392,7 @@ eval $no_splitting $q6;
--sorted_result
eval $q6;
eval explain extended $q6;
+--source include/explain-no-costs.inc
eval explain format=json $q6;
let $q60=
@@ -3218,6 +3405,7 @@ eval $no_splitting $q60;
--sorted_result
eval $q60;
eval explain extended $q60;
+--source include/explain-no-costs.inc
eval explain format=json $q60;
drop table t1,t2,t3,t4;
@@ -3254,6 +3442,7 @@ SELECT * FROM t4 WHERE c IN ( SELECT c FROM v1 ) and a < 2;
eval $no_splitting $q1;
eval $q1;
eval explain extended $q1;
+--source include/explain-no-costs.inc
eval explain format=json $q1;
DROP VIEW v1;
@@ -3324,7 +3513,7 @@ CREATE TABLE t1 (pk1 INT PRIMARY KEY, f INT) ENGINE=Aria;
INSERT INTO t1 VALUES (1,0),(2,0);
CREATE TABLE t2 (pk2 INT PRIMARY KEY) ENGINE=Aria;
-INSERT INTO t2 VALUES (1),(2),(3);
+INSERT INTO t2 VALUES (1),(2),(3),(11),(12),(13);
CREATE VIEW v2 AS SELECT pk2, COUNT(*) AS cnt FROM t2 GROUP BY pk2;
@@ -3333,6 +3522,7 @@ SELECT * FROM t1 INNER JOIN v2 ON pk1 = pk2 WHERE f <> 5;
eval $q;
eval EXPLAIN EXTENDED $q;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q;
DROP VIEW v2;
@@ -3601,7 +3791,8 @@ insert into t1 values
(17,1),(17,3010),(17,3013),(17,3053),(21,2446),(21,2467),(21,2);
create table t2 (a int) engine=myisam;
-insert into t2 values (1),(2),(3);
+insert into t2 values (1),(2),(3),(1000),(2000),(3000);
+insert into t2 select 5000 from seq_5000_to_6000;
create table t3 (id int) engine=myisam;
insert into t3 values (1),(2);
@@ -3615,11 +3806,13 @@ select * from t1, (select a from t1 cp2 group by a) dt, t3
set optimizer_switch="split_materialized=off";
eval $q;
eval explain $q;
+--source include/explain-no-costs.inc
eval explain format=json $q;
set optimizer_switch="split_materialized=default";
eval $q;
eval explain $q;
+--source include/explain-no-costs.inc
eval explain format=json $q;
eval prepare stmt from "$q";
@@ -3630,7 +3823,7 @@ deallocate prepare stmt;
drop table t1,t2,t3;
--echo #
---echo # MDEV-MDEV-27132: Splittable derived with equality in WHERE
+--echo # MDEV-27132: Splittable derived with equality in WHERE
--echo #
CREATE TABLE t1 (
@@ -3677,6 +3870,9 @@ INSERT INTO t2(deleted, t1_id, email, reporting_person)
INSERT INTO t2(deleted, t1_id, email, reporting_person)
SELECT deleted, t1_id+160000, email, reporting_person FROM t2;
+insert into t2 (id,t1_id) select -seq,-seq from seq_1_to_1000;
+
+
CREATE TABLE t3 (
id int PRIMARY KEY,
deleted int,
@@ -3719,11 +3915,13 @@ set optimizer_switch='split_materialized=on';
eval $q;
eval EXPLAIN $q;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q;
set optimizer_switch='split_materialized=off';
eval $q;
+eval EXPLAIN $q;
set optimizer_switch='split_materialized=default';
@@ -3826,6 +4024,7 @@ WHERE charges.to_ledger_id = 2;
set optimizer_switch='split_materialized=on';
eval $q;
eval EXPLAIN $q;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q;
set optimizer_switch='split_materialized=off';
@@ -3858,6 +4057,7 @@ WHERE charges.to_ledger_id = 2;
set optimizer_switch='split_materialized=on';
eval $q1;
eval EXPLAIN $q1;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q1;
set optimizer_switch='split_materialized=off';
diff --git a/mysql-test/main/derived_opt.result b/mysql-test/main/derived_opt.result
index cf0c1cb617f..2f3b29c5049 100644
--- a/mysql-test/main/derived_opt.result
+++ b/mysql-test/main/derived_opt.result
@@ -92,13 +92,13 @@ pla_id test
explain SELECT STRAIGHT_JOIN d.pla_id, m2.mat_id FROM t1 m2 INNER JOIN (SELECT mp.pla_id, MIN(m1.matintnum) AS matintnum FROM t2 mp INNER JOIN t1 m1 ON mp.mat_id=m1.mat_id GROUP BY mp.pla_id) d ON d.matintnum=m2.matintnum;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY m2 ALL NULL NULL NULL NULL 9
-1 PRIMARY <derived2> ref key0 key0 7 test.m2.matintnum 2
+1 PRIMARY <derived2> ref key0 key0 7 test.m2.matintnum 1
2 DERIVED mp ALL NULL NULL NULL NULL 9 Using temporary; Using filesort
2 DERIVED m1 eq_ref PRIMARY PRIMARY 3 test.mp.mat_id 1
explain SELECT STRAIGHT_JOIN d.pla_id, m2.test FROM t1 m2 INNER JOIN (SELECT mp.pla_id, MIN(m1.matintnum) AS matintnum FROM t2 mp INNER JOIN t1 m1 ON mp.mat_id=m1.mat_id GROUP BY mp.pla_id) d ON d.matintnum=m2.matintnum;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY m2 ALL NULL NULL NULL NULL 9
-1 PRIMARY <derived2> ref key0 key0 7 test.m2.matintnum 2
+1 PRIMARY <derived2> ref key0 key0 7 test.m2.matintnum 1
2 DERIVED mp ALL NULL NULL NULL NULL 9 Using temporary; Using filesort
2 DERIVED m1 eq_ref PRIMARY PRIMARY 3 test.mp.mat_id 1
drop table t1,t2;
@@ -323,10 +323,9 @@ JOIN t1 AS tc ON (tb.pk = tc.pk)
JOIN t4 AS td ON tc.a = td.a) tu)
limit 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL #
+1 PRIMARY <derived3> ALL distinct_key NULL NULL NULL #
1 PRIMARY tx eq_ref PRIMARY PRIMARY 4 tu.pk # Using index
1 PRIMARY ty eq_ref PRIMARY PRIMARY 4 tu.pk # Using index
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL #
3 DERIVED td system PRIMARY NULL NULL NULL # Using temporary
3 DERIVED tc ref PRIMARY,a a 3 const #
3 DERIVED ta eq_ref PRIMARY PRIMARY 4 test.tc.pk # Using index
@@ -535,7 +534,7 @@ ON t2.id=t.id
WHERE t2.id < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 Using index condition
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.id 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.id 1
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using temporary; Using filesort
set join_cache_level=default;
set optimizer_switch= @save_optimizer_switch;
diff --git a/mysql-test/main/derived_split_innodb.result b/mysql-test/main/derived_split_innodb.result
index 74876836a53..9ea3d0f1396 100644
--- a/mysql-test/main/derived_split_innodb.result
+++ b/mysql-test/main/derived_split_innodb.result
@@ -20,7 +20,7 @@ EXPLAIN SELECT t1.n1 FROM t1, (SELECT n1, n2 FROM t1 WHERE c1 = 'a' GROUP BY n1)
WHERE t.n1 = t1.n1 AND t.n2 = t1.n2 AND c1 = 'a' GROUP BY n1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ref c1,n1_c1_n2 c1 1 const 2 Using index condition; Using where; Using temporary; Using filesort
-1 PRIMARY <derived2> ref key0 key0 8 test.t1.n1,test.t1.n2 2
+1 PRIMARY <derived2> ref key0 key0 8 test.t1.n1,test.t1.n2 1
2 LATERAL DERIVED t1 ref c1,n1_c1_n2 n1_c1_n2 4 test.t1.n1 1 Using where; Using index
SELECT t1.n1 FROM t1, (SELECT n1, n2 FROM t1 WHERE c1 = 'a' GROUP BY n1) as t
WHERE t.n1 = t1.n1 AND t.n2 = t1.n2 AND c1 = 'a' GROUP BY n1;
@@ -49,7 +49,7 @@ t2
WHERE t2.id2=t.id2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.id2 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.id2 1
2 DERIVED t3 ALL NULL NULL NULL NULL 1 Using where; Using temporary; Using filesort
2 DERIVED t1 eq_ref PRIMARY,id2 PRIMARY 4 test.t3.i3 1
2 DERIVED t2 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (flat, BNL join)
@@ -99,7 +99,7 @@ ON t2.id=t.id
WHERE t2.id < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.id 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.id 1
2 LATERAL DERIVED t1 eq_ref PRIMARY PRIMARY 4 test.t2.id 1
set join_cache_level=default;
DROP TABLE t1,t2;
@@ -128,8 +128,8 @@ left join
(v1 join t1 as t on v1.f1=t.f1 and t.f2 = null)
on t1.f1=t.f1;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t const f2 NULL NULL NULL 1 Impossible ON condition
-1 PRIMARY <derived2> const key1 NULL NULL NULL 1 Impossible ON condition
+1 PRIMARY t const f2 NULL NULL NULL 0 Impossible ON condition
+1 PRIMARY <derived2> const key0,key1 NULL NULL NULL 0 Impossible ON condition
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
2 DERIVED t2 ALL PRIMARY NULL NULL NULL 3 Using temporary; Using filesort
set statement optimizer_switch='split_materialized=off' for explain select t.f2
@@ -138,8 +138,8 @@ left join
(v1 join t1 as t on v1.f1=t.f1 and t.f2 = null)
on t1.f1=t.f1;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t const f2 NULL NULL NULL 1 Impossible ON condition
-1 PRIMARY <derived3> const key1 NULL NULL NULL 1 Impossible ON condition
+1 PRIMARY t const f2 NULL NULL NULL 0 Impossible ON condition
+1 PRIMARY <derived3> const key0,key1 NULL NULL NULL 0 Impossible ON condition
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
3 DERIVED t2 index NULL PRIMARY 4 NULL 3
drop view v1;
@@ -157,26 +157,26 @@ set statement optimizer_switch='split_materialized=off' for EXPLAIN
SELECT *
FROM
t1 JOIN
-(SELECT t1.a, t1.b FROM t1, t2 WHERE t1.b = t2.c GROUP BY t1.a, t1.b) as dt
+(SELECT t1_inner.a, t1_inner.b FROM t1 as t1_inner, t2 as t2_inner WHERE t1_inner.b = t2_inner.c GROUP BY t1_inner.a, t1_inner.b) as dt
WHERE
t1.a = dt.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index a,a_2 a_2 10 NULL 6 Using where; Using index
-1 PRIMARY <derived3> ref key0 key0 5 test.t1.a 2
-3 DERIVED t1 index NULL a_2 10 NULL 6 Using where; Using index
-3 DERIVED t2 ref c c 5 test.t1.b 1 Using index
+1 PRIMARY <derived3> ref key0 key0 5 test.t1.a 1
+3 DERIVED t1_inner index NULL a_2 10 NULL 6 Using where; Using index
+3 DERIVED t2_inner ref c c 5 test.t1_inner.b 1 Using index
set statement optimizer_switch='split_materialized=on' for EXPLAIN
SELECT *
FROM
t1 JOIN
-(SELECT t1.a, t1.b FROM t1, t2 WHERE t1.b = t2.c GROUP BY t1.a, t1.b) as dt
+(SELECT t1_inner.a, t1_inner.b FROM t1 as t1_inner, t2 as t2_inner WHERE t1_inner.b = t2_inner.c GROUP BY t1_inner.a, t1_inner.b) as dt
WHERE
t1.a = dt.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index a,a_2 a_2 10 NULL 6 Using where; Using index
-1 PRIMARY <derived3> ref key0 key0 5 test.t1.a 2
-3 LATERAL DERIVED t1 ref a,a_2 a 5 test.t1.a 1 Using where; Using temporary; Using filesort
-3 LATERAL DERIVED t2 ref c c 5 test.t1.b 1 Using index
+1 PRIMARY <derived3> ref key0 key0 5 test.t1.a 1
+3 DERIVED t1_inner index a,a_2 a_2 10 NULL 6 Using where; Using index
+3 DERIVED t2_inner ref c c 5 test.t1_inner.b 1 Using index
DROP TABLE t1, t2;
#
# Bug mdev-25714: usage non-splitting covering index is cheaper than
@@ -208,8 +208,8 @@ t2
where t1.id = dt.id and t1.itemid = dt.itemid and t2.id=t1.itemid;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 1
-1 PRIMARY <derived2> ref key1 key1 4 test.t2.id 2
-1 PRIMARY t1 ALL idx NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <derived2> ref key1 key1 4 test.t2.id 1
+1 PRIMARY t1 ref idx idx 4 test.t2.id 3 Using where
2 DERIVED t3 ref idx1,idx2 idx1 4 const 5 Using where; Using index
select t1.id, t1.itemid, dt.id, t2.id
from t1,
@@ -227,8 +227,8 @@ t2
where t1.id = dt.id and t1.itemid = dt.itemid and t2.id=t1.itemid;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 1
-1 PRIMARY <derived2> ref key1 key1 4 test.t2.id 2
-1 PRIMARY t1 ALL idx NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <derived2> ref key1 key1 4 test.t2.id 1
+1 PRIMARY t1 ref idx idx 4 test.t2.id 3 Using where
2 DERIVED t3 ref idx1 idx1 4 const 5 Using where; Using index
select t1.id, t1.itemid, dt.id, t2.id
from t1,
@@ -273,7 +273,7 @@ on t3.a=t.a and t3.c=t.c
where t3.b > 15;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 range idx_b idx_b 5 NULL 2 Using index condition; Using where
-1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 2
+1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 1
2 LATERAL DERIVED t4 ref idx idx 133 test.t3.a,test.t3.c 1
# ... and if one adds WITH ROLLUP, then LATERAL DERIVED is no longer used:
explain select t3.a,t3.c,t.max,t.min
diff --git a/mysql-test/main/derived_split_innodb.test b/mysql-test/main/derived_split_innodb.test
index 1ebe27cd12c..86a2b6d73b0 100644
--- a/mysql-test/main/derived_split_innodb.test
+++ b/mysql-test/main/derived_split_innodb.test
@@ -146,7 +146,7 @@ EXPLAIN
SELECT *
FROM
t1 JOIN
- (SELECT t1.a, t1.b FROM t1, t2 WHERE t1.b = t2.c GROUP BY t1.a, t1.b) as dt
+ (SELECT t1_inner.a, t1_inner.b FROM t1 as t1_inner, t2 as t2_inner WHERE t1_inner.b = t2_inner.c GROUP BY t1_inner.a, t1_inner.b) as dt
WHERE
t1.a = dt.a;
diff --git a/mysql-test/main/derived_view.result b/mysql-test/main/derived_view.result
index b86cd1c42cc..aa7519ae750 100644
--- a/mysql-test/main/derived_view.result
+++ b/mysql-test/main/derived_view.result
@@ -214,7 +214,7 @@ explain extended
select * from t1 join (select * from t2 group by f2) tt on f1=f2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 2 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 1 100.00
2 DERIVED t2 ALL NULL NULL NULL NULL 11 100.00 Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`f1` AS `f1`,`test`.`t1`.`f11` AS `f11`,`tt`.`f2` AS `f2`,`tt`.`f22` AS `f22` from `test`.`t1` join (/* select#2 */ select `test`.`t2`.`f2` AS `f2`,`test`.`t2`.`f22` AS `f22` from `test`.`t2` group by `test`.`t2`.`f2`) `tt` where `tt`.`f2` = `test`.`t1`.`f1`
@@ -228,7 +228,7 @@ flush status;
explain select * from t1 join (select * from t2 group by f2) tt on f1=f2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 11 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 1
2 DERIVED t2 ALL NULL NULL NULL NULL 11 Using temporary; Using filesort
show status like 'Handler_read%';
Variable_name Value
@@ -288,7 +288,7 @@ explain showing created indexes
explain extended select * from t1 join v2 on f1=f2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 2 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 1 100.00
2 DERIVED t2 ALL NULL NULL NULL NULL 11 100.00 Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`f1` AS `f1`,`test`.`t1`.`f11` AS `f11`,`v2`.`f2` AS `f2`,`v2`.`f22` AS `f22` from `test`.`t1` join `test`.`v2` where `v2`.`f2` = `test`.`t1`.`f1`
@@ -339,7 +339,7 @@ flush status;
explain select * from t1 join v2 on f1=f2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 11 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 1
2 DERIVED t2 ALL NULL NULL NULL NULL 11 Using temporary; Using filesort
show status like 'Handler_read%';
Variable_name Value
@@ -372,7 +372,7 @@ Handler_read_rnd_next 36
explain extended select * from v1 join v4 on f1=f2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.f2 2 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.f2 1 100.00
2 DERIVED t1 ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `v1`.`f1` AS `f1`,`v1`.`f11` AS `f11`,`test`.`t2`.`f2` AS `f2`,`test`.`t2`.`f22` AS `f22` from `test`.`v1` join `test`.`t2` where `v1`.`f1` = `test`.`t2`.`f2` and `test`.`t2`.`f2` in (2,3)
@@ -381,12 +381,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.f2 in (2,3) and t2.f2 is not null"
}
@@ -400,11 +403,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["f1"],
"ref": ["test.t2.f2"],
- "rows": 2,
+ "loops": 11,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.f1",
"temporary_table": {
@@ -413,7 +419,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f1 in (2,3)"
}
@@ -485,16 +493,20 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "tt.f1",
"temporary_table": {
@@ -503,12 +515,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "tt.f1 > 2",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.f1",
"temporary_table": {
@@ -517,7 +532,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f1 < 7 and t1.f1 > 2"
}
@@ -552,7 +569,7 @@ join
on x.f1 = z.f1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <derived3> ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived5> ref key0 key0 5 tt.f1 2 100.00
+1 PRIMARY <derived5> ref key0 key0 5 tt.f1 1 100.00
5 DERIVED t1 ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
3 DERIVED t1 ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
Warnings:
@@ -566,17 +583,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "tt.f1 > 2 and tt.f1 > 2 and tt.f1 is not null",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.f1",
"temporary_table": {
@@ -585,7 +606,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f1 < 7 and t1.f1 > 2 and t1.f1 > 2"
}
@@ -606,11 +629,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["f1"],
"ref": ["tt.f1"],
- "rows": 2,
+ "loops": 11,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 5,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.f1",
"temporary_table": {
@@ -619,7 +645,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f1 < 7 and t1.f1 > 2 and t1.f1 > 2"
}
@@ -689,7 +717,7 @@ join
on x.f1 = z.f1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived4> ref key0 key0 5 x.f1 2 100.00
+1 PRIMARY <derived4> ref key0 key0 5 x.f1 1 100.00
4 DERIVED <derived5> ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
5 DERIVED t1 ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
2 DERIVED <derived3> ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
@@ -707,17 +735,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "x.f1 is not null",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "tt.f1",
"temporary_table": {
@@ -726,12 +758,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "tt.f1 > 2",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.f1",
"temporary_table": {
@@ -740,7 +775,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f1 < 7 and t1.f1 > 2"
}
@@ -768,11 +805,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["f1"],
"ref": ["x.f1"],
- "rows": 2,
+ "loops": 11,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 4,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "tt.f1",
"temporary_table": {
@@ -781,12 +821,15 @@ EXPLAIN
"table": {
"table_name": "<derived5>",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "tt.f1 > 2",
"materialized": {
"query_block": {
"select_id": 5,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.f1",
"temporary_table": {
@@ -795,7 +838,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f1 < 7 and t1.f1 > 2"
}
@@ -853,17 +898,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.f1 < 7",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.f1",
"temporary_table": {
@@ -872,7 +921,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f1 < 7"
}
@@ -930,7 +981,7 @@ join of above two
explain extended select * from v6 join v7 on f2=f1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 11 100.00 Using where
-1 PRIMARY <derived5> ref key0 key0 5 test.t2.f2 2 100.00
+1 PRIMARY <derived5> ref key0 key0 5 test.t2.f2 1 100.00
5 DERIVED t1 ALL NULL NULL NULL NULL 11 100.00 Using where; Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`f2` AS `f2`,`test`.`t2`.`f22` AS `f22`,`v1`.`f1` AS `f1`,`v1`.`f11` AS `f11` from `test`.`t2` join `test`.`v1` where `v1`.`f1` = `test`.`t2`.`f2` and `test`.`t2`.`f2` < 7 and `test`.`t2`.`f2` in (2,3)
@@ -939,12 +990,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.f2 < 7 and t2.f2 in (2,3) and t2.f2 is not null"
}
@@ -958,11 +1012,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["f1"],
"ref": ["test.t2.f2"],
- "rows": 2,
+ "loops": 11,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 5,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.f1",
"temporary_table": {
@@ -971,7 +1028,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.f1 < 7 and t1.f1 in (2,3)"
}
@@ -994,7 +1053,7 @@ test two keys
explain select * from t1 join (select * from t2 group by f2) tt on t1.f1=tt.f2 join t1 xx on tt.f22=xx.f1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 11 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.f1 1
1 PRIMARY xx ALL NULL NULL NULL NULL 11 Using where; Using join buffer (flat, BNL join)
2 DERIVED t2 ALL NULL NULL NULL NULL 11 Using temporary; Using filesort
select * from t1 join (select * from t2 group by f2) tt on t1.f1=tt.f2 join t1 xx on tt.f22=xx.f1;
@@ -1019,7 +1078,7 @@ EXPLAIN
SELECT * FROM v1 JOIN t2 ON v1.f1 = t2.f1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 index f1 f1 5 NULL 3 Using where; Using index
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.f1 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.f1 1
2 DERIVED t1 ALL NULL NULL NULL NULL 4
SELECT * FROM v1 JOIN t2 ON v1.f1 = t2.f1;
f1 f1
@@ -1216,7 +1275,7 @@ SELECT * FROM t3
WHERE t3.a IN (SELECT v1.a FROM v1, t2 WHERE t2.a = v1.b);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 4 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> ref key1 key1 5 func 2 100.00
+2 DEPENDENT SUBQUERY <derived3> ref key1 key1 5 func 1 100.00
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
3 DERIVED t1 ALL NULL NULL NULL NULL 3 100.00 Using temporary; Using filesort
Warnings:
@@ -1248,9 +1307,9 @@ SELECT t.f1 AS f
FROM (SELECT DISTINCT t1.* FROM t1,t2 WHERE t2.f2 = t1.f2) t,t3,t4
WHERE t4.f2 = t3.f2 AND t4.f2 = t.f1 ORDER BY f;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2 Using where; Using filesort
-1 PRIMARY t4 ref f2 f2 4 t.f1 1 Using index
-1 PRIMARY t3 ref f2 f2 4 t.f1 2 Using index
+1 PRIMARY t4 index f2 f2 9 NULL 2 Using where; Using index; Using temporary; Using filesort
+1 PRIMARY <derived2> ref key1 key1 4 test.t4.f2 1
+1 PRIMARY t3 ref f2 f2 4 test.t4.f2 1 Using index
2 DERIVED t2 system NULL NULL NULL NULL 1 Using temporary
2 DERIVED t1 ref f2 f2 4 const 2 Using where
SELECT t.f1 AS f
@@ -1276,7 +1335,7 @@ EXPLAIN
SELECT * FROM t1 AS t JOIN v1 AS v WHERE t.a = v.b AND t.b = v.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL NULL NULL NULL NULL 3 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t.a 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t.a 1
2 DERIVED t1 ALL NULL NULL NULL NULL 3
SELECT * FROM t1 AS t JOIN v1 AS v WHERE t.a = v.b AND t.b = v.b;
a b a b
@@ -1582,7 +1641,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 2 FirstMatch(t1)
+1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 1 FirstMatch(t1)
3 DERIVED t2 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
SELECT * FROM v2;
a b
@@ -1602,7 +1661,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 1 FirstMatch(t1)
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 10 test.t1.a,test.t1.b 1
3 DERIVED t2 ALL NULL NULL NULL NULL 6
4 UNION t3 ALL NULL NULL NULL NULL 4
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -1881,14 +1940,14 @@ WHERE (t2.a ,t1.b) NOT IN (SELECT DISTINCT c,a FROM t3 t);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY t unique_subquery PRIMARY,c PRIMARY 4 func 1 Using where
+2 DEPENDENT SUBQUERY t index_subquery PRIMARY,c c 8 func,func 1 Using index; Using where
EXPLAIN
SELECT * FROM t1 , t2
WHERE (t2.a ,t1.b) NOT IN (SELECT DISTINCT c,a FROM (SELECT * FROM t3) t);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY t3 unique_subquery PRIMARY,c PRIMARY 4 func 1 Using where
+2 DEPENDENT SUBQUERY t3 index_subquery PRIMARY,c c 8 func,func 1 Using index; Using where
SELECT * FROM t1 , t2
WHERE (t2.a ,t1.b) NOT IN (SELECT DISTINCT c,a FROM (SELECT * FROM t3) t);
b a
@@ -1917,7 +1976,7 @@ WHERE t2.c = v1.c AND t2.c = v1.b AND v1.b = t3.c);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 system NULL NULL NULL NULL 1
1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where
-1 PRIMARY <derived3> ALL NULL NULL NULL NULL 3 Using where; Start temporary; End temporary
+1 PRIMARY <derived3> ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t3)
3 DERIVED t1 ALL NULL NULL NULL NULL 3 Using where
SELECT * FROM t3
WHERE t3.b IN (SELECT v1.b FROM v1, t2
@@ -1931,8 +1990,8 @@ WHERE t3.b IN (SELECT v1.b FROM v1, t2
WHERE t2.c = v1.c AND t2.c = v1.b AND v1.b = t3.c);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 system NULL NULL NULL NULL 1
-1 PRIMARY <derived3> ref key1 key1 8 const,const 0 Start temporary
-1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where
+1 PRIMARY <derived3> ref key1 key1 8 const,const 0 FirstMatch(t3)
3 DERIVED t1 ALL NULL NULL NULL NULL 3 Using where
SELECT * FROM t3
WHERE t3.b IN (SELECT v1.b FROM v1, t2
@@ -1955,7 +2014,7 @@ EXPLAIN
SELECT * FROM t1 WHERE t1.b IN (SELECT v2.a FROM v2 WHERE v2.b = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.b,test.t1.a 2 FirstMatch(t1)
+1 PRIMARY <derived3> ref key0 key0 10 test.t1.b,test.t1.a 1 FirstMatch(t1)
3 DERIVED t2 ALL NULL NULL NULL NULL 2
SELECT * FROM t1 WHERE t1.b IN (SELECT v2.a FROM v2 WHERE v2.b = t1.a);
a b
@@ -2009,7 +2068,7 @@ EXPLAIN
SELECT v1.a FROM v1,v2 WHERE v2.b = v1.b ORDER BY 1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 Using where; Using filesort
-1 PRIMARY <derived3> ref key0 key0 4 v1.b 2
+1 PRIMARY <derived3> ref key0 key0 4 v1.b 1
3 DERIVED t2 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort
2 DERIVED t1 ALL NULL NULL NULL NULL 3 Using temporary; Using filesort
DROP VIEW v1,v2;
@@ -2370,7 +2429,7 @@ GROUP BY TABLE_SCHEMA) AS UNIQUES
ON ( COLUMNS.TABLE_SCHEMA = UNIQUES.TABLE_SCHEMA);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY COLUMNS ALL NULL NULL NULL NULL NULL Open_frm_only; Scanned all databases
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <derived2> ref key0 key0 194 information_schema.COLUMNS.TABLE_SCHEMA 10
2 DERIVED STATISTICS ALL NULL NULL NULL NULL NULL Open_frm_only; Scanned all databases; Using filesort
SELECT COUNT(*) > 0
FROM INFORMATION_SCHEMA.COLUMNS
@@ -2439,10 +2498,14 @@ CREATE TABLE t2 (a int, INDEX(a));
INSERT INTO t2 VALUES (1), (2);
INSERT INTO t1 SELECT a FROM (SELECT a FROM test.t1) AS s1 NATURAL JOIN
t2 AS s2;
+INSERT INTO t1 SELECT a FROM (SELECT a FROM test.t1) AS s1 NATURAL JOIN
+t2 AS s2;
SELECT * FROM t1;
a
1
1
+1
+1
DELETE FROM t1;
INSERT INTO t1 VALUES (1);
PREPARE stmt FROM "
@@ -2596,7 +2659,7 @@ EXPLAIN EXTENDED
SELECT v1.c1, v1.c2 FROM v1, t2 WHERE v1.c1=t2.c1 AND v1.c2=t2.c2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.c2 2 100.00 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.c2 1 100.00 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 5 100.00
Warnings:
Note 1003 /* select#1 */ select `v1`.`c1` AS `c1`,`v1`.`c2` AS `c2` from `test`.`v1` join `test`.`t2` where `v1`.`c1` = `test`.`t2`.`c1` and `v1`.`c2` = `test`.`t2`.`c2`
@@ -2609,7 +2672,7 @@ SELECT t2.c1, t2.c2 FROM (SELECT c1 g, MAX(c2) m FROM t1 GROUP BY c1) t, t2
WHERE t.g=t2.c1 AND t.m=t2.c2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.c2 2 100.00 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.c2 1 100.00 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 5 100.00 Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`c1` AS `c1`,`test`.`t2`.`c2` AS `c2` from (/* select#2 */ select `test`.`t1`.`c1` AS `g`,max(`test`.`t1`.`c2`) AS `m` from `test`.`t1` group by `test`.`t1`.`c1`) `t` join `test`.`t2` where `t`.`g` = `test`.`t2`.`c1` and `t`.`m` = `test`.`t2`.`c2`
@@ -2940,7 +3003,7 @@ GROUP BY mp.pla_id) d
ON d.matintnum=m2.matintnum;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY m2 ALL NULL NULL NULL NULL 9
-1 PRIMARY <derived2> ref key0 key0 7 test.m2.matintnum 2
+1 PRIMARY <derived2> ref key0 key0 7 test.m2.matintnum 1
2 DERIVED mp ALL NULL NULL NULL NULL 9 Using temporary; Using filesort
2 DERIVED m1 eq_ref PRIMARY PRIMARY 3 test.mp.mat_id 1
prepare stmt1 from
@@ -3074,8 +3137,8 @@ EXPLAIN EXTENDED
SELECT * FROM t1 LEFT JOIN v2 ON t1.id=v2.order_pk;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.id 2 100.00
-2 DERIVED t1 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index; Using filesort
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.id 1 100.00
+2 LATERAL DERIVED t1 eq_ref PRIMARY PRIMARY 4 test.t1.id 1 100.00 Using where; Using index
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`v2`.`order_pk` AS `order_pk` from `test`.`t1` left join `test`.`v2` on(`v2`.`order_pk` = `test`.`t1`.`id`) where 1
SELECT * FROM t1 LEFT JOIN v3 ON t1.id=v3.order_pk;
@@ -3088,8 +3151,8 @@ EXPLAIN EXTENDED
SELECT * FROM t1 LEFT JOIN v3 ON t1.id=v3.order_pk;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.id 2 100.00
-2 DERIVED t1 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index; Using filesort
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.id 1 100.00
+2 LATERAL DERIVED t1 eq_ref PRIMARY PRIMARY 4 test.t1.id 1 100.00 Using where; Using index
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`v3`.`order_pk` AS `order_pk` from `test`.`t1` left join `test`.`v3` on(`v3`.`order_pk` = `test`.`t1`.`id`) where 1
DROP VIEW v1,v2,v3;
diff --git a/mysql-test/main/derived_view.test b/mysql-test/main/derived_view.test
index caccc7dafa1..13444676659 100644
--- a/mysql-test/main/derived_view.test
+++ b/mysql-test/main/derived_view.test
@@ -118,6 +118,7 @@ select * from t1 join v2 on f1=f2;
show status like 'Handler_read%';
explain extended select * from v1 join v4 on f1=f2;
+--source include/explain-no-costs.inc
explain format=json select * from v1 join v4 on f1=f2;
select * from v1 join v4 on f1=f2;
@@ -142,6 +143,7 @@ select * from (select * from
--echo materialized derived in materialized derived
explain extended select * from (select * from
(select * from t1 where f1 < 7 group by f1) tt where f1 > 2 group by f1) zz;
+--source include/explain-no-costs.inc
explain format=json select * from (select * from
(select * from t1 where f1 < 7 group by f1) tt where f1 > 2 group by f1) zz;
select * from (select * from
@@ -153,6 +155,7 @@ explain extended select * from
join
(select * from (select * from t1 where f1 < 7 group by f1) tt where f1 > 2) z
on x.f1 = z.f1;
+--source include/explain-no-costs.inc
explain format=json select * from
(select * from (select * from t1 where f1 < 7 group by f1) tt where f1 > 2) x
join
@@ -194,6 +197,7 @@ join
(select * from
(select * from t1 where f1 < 7 group by f1) tt where f1 > 2 group by f1) z
on x.f1 = z.f1;
+--source include/explain-no-costs.inc
explain format=json select * from
(select * from
(select * from t1 where f1 < 7 group by f1) tt where f1 > 2 group by f1) x
@@ -218,6 +222,7 @@ select * from (select * from v4 group by 1) tt;
--echo materialized view in merged derived
explain extended
select * from ( select * from v1 where f1 < 7) tt;
+--source include/explain-no-costs.inc
explain format=json
select * from ( select * from v1 where f1 < 7) tt;
select * from ( select * from v1 where f1 < 7) tt;
@@ -234,6 +239,7 @@ select * from (select * from v7 group by 1) tt;
--echo join of above two
explain extended select * from v6 join v7 on f2=f1;
+--source include/explain-no-costs.inc
explain format=json select * from v6 join v7 on f2=f1;
select * from v6 join v7 on f2=f1;
@@ -1419,6 +1425,8 @@ INSERT INTO t2 VALUES (1), (2);
INSERT INTO t1 SELECT a FROM (SELECT a FROM test.t1) AS s1 NATURAL JOIN
t2 AS s2;
+INSERT INTO t1 SELECT a FROM (SELECT a FROM test.t1) AS s1 NATURAL JOIN
+t2 AS s2;
SELECT * FROM t1;
DELETE FROM t1;
diff --git a/mysql-test/main/desc_index_range.result b/mysql-test/main/desc_index_range.result
index 6b1f2e31c31..1dc17702a9f 100644
--- a/mysql-test/main/desc_index_range.result
+++ b/mysql-test/main/desc_index_range.result
@@ -194,7 +194,7 @@ test.t1 analyze status OK
# Must use ROR-intersect:
explain select * from t1 where b = 255 AND a IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge a,b b,a 5,5 NULL 1 Using intersect(b,a); Using where; Using index
+1 SIMPLE t1 ref a,b b 5 const 2 Using where
select * from t1 where b = 255 AND a IS NULL;
pk a b
10000 NULL 255
diff --git a/mysql-test/main/distinct.result b/mysql-test/main/distinct.result
index 888d3143f2c..02a76cf3e09 100644
--- a/mysql-test/main/distinct.result
+++ b/mysql-test/main/distinct.result
@@ -173,9 +173,9 @@ INSERT INTO t2 values (1),(2),(3);
INSERT INTO t3 VALUES (1,'1'),(2,'2'),(1,'1'),(2,'2');
explain SELECT distinct t3.a FROM t3,t2,t1 WHERE t3.a=t1.b AND t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 index a a 4 NULL 5 Using index; Using temporary
-1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 Using where
-1 SIMPLE t3 ref a a 5 test.t1.b 2 Using index
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 4 Using where; Using temporary
+1 SIMPLE t3 ref a a 5 test.t1.b 1 Using index
+1 SIMPLE t2 ref a a 4 test.t1.a 1 Using index; Distinct
SELECT distinct t3.a FROM t3,t2,t1 WHERE t3.a=t1.b AND t1.a=t2.a;
a
1
@@ -522,8 +522,8 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN SELECT DISTINCT t1_1.a, t1_1.b FROM t1 t1_1, t1 t1_2
WHERE t1_1.a = t1_2.a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1_1 ALL PRIMARY NULL NULL NULL 3 Using temporary
-1 SIMPLE t1_2 eq_ref PRIMARY PRIMARY 4 test.t1_1.a 1 Using index; Distinct
+1 SIMPLE t1_2 index PRIMARY PRIMARY 4 NULL 3 Using index; Using temporary
+1 SIMPLE t1_1 eq_ref PRIMARY PRIMARY 4 test.t1_2.a 1
EXPLAIN SELECT a FROM t1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 4 NULL 3 Using index
@@ -538,10 +538,10 @@ PRIMARY KEY (a,b));
INSERT INTO t2 VALUES (1,1,1,50), (1,2,3,40), (2,1,3,4);
EXPLAIN SELECT DISTINCT a FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL PRIMARY 4 NULL 4 Using index for group-by
+1 SIMPLE t2 range NULL PRIMARY 4 NULL 3 Using index for group-by
EXPLAIN SELECT DISTINCT a,a FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL PRIMARY 4 NULL 4 Using index for group-by
+1 SIMPLE t2 range NULL PRIMARY 4 NULL 3 Using index for group-by
EXPLAIN SELECT DISTINCT b,a FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL PRIMARY 8 NULL 3 Using index
@@ -754,9 +754,6 @@ INSERT INTO t1(a, b, c) VALUES (1, 1, 1),
(1, 2, 1),
(1, 2, 2),
(1, 2, 3);
-EXPLAIN SELECT DISTINCT a, b, d, c FROM t1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL PRIMARY 16 NULL 7 Using index for group-by; Using temporary
SELECT DISTINCT a, b, d, c FROM t1;
a b d c
1 1 0 1
@@ -765,6 +762,13 @@ a b d c
1 2 0 1
1 2 0 2
1 2 0 3
+EXPLAIN SELECT DISTINCT a, b, d, c FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL a 16 NULL 6 Using index
+INSERT INTO t1 SELECT seq/10,seq/10,seq/10,seq/10,seq from seq_1_to_100;
+EXPLAIN SELECT DISTINCT a, b, d, c FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL PRIMARY 16 NULL 10 Using index for group-by; Using temporary
DROP TABLE t1;
#
# Bug #46159: simple query that never returns
diff --git a/mysql-test/main/distinct.test b/mysql-test/main/distinct.test
index 32e189da98a..2f10d866560 100644
--- a/mysql-test/main/distinct.test
+++ b/mysql-test/main/distinct.test
@@ -4,6 +4,7 @@
#
--source include/default_optimizer_switch.inc
+--source include/have_sequence.inc
--disable_warnings
drop table if exists t1,t2,t3;
--enable_warnings
@@ -574,9 +575,10 @@ INSERT INTO t1(a, b, c) VALUES (1, 1, 1),
(1, 2, 2),
(1, 2, 3);
-EXPLAIN SELECT DISTINCT a, b, d, c FROM t1;
-
SELECT DISTINCT a, b, d, c FROM t1;
+EXPLAIN SELECT DISTINCT a, b, d, c FROM t1;
+INSERT INTO t1 SELECT seq/10,seq/10,seq/10,seq/10,seq from seq_1_to_100;
+EXPLAIN SELECT DISTINCT a, b, d, c FROM t1;
DROP TABLE t1;
diff --git a/mysql-test/main/events_bugs.result b/mysql-test/main/events_bugs.result
index e3984bcd67a..0615dac53b1 100644
--- a/mysql-test/main/events_bugs.result
+++ b/mysql-test/main/events_bugs.result
@@ -405,7 +405,7 @@ SELECT event_name, definer FROM INFORMATION_SCHEMA.EVENTS;
event_name definer
e1 mysqltest_u1@localhost
ALTER DEFINER=root@localhost EVENT e1 ON SCHEDULE EVERY 1 HOUR;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
SELECT event_name, definer FROM INFORMATION_SCHEMA.EVENTS;
event_name definer
e1 mysqltest_u1@localhost
@@ -418,7 +418,7 @@ event_name definer
e1 mysqltest_u1@localhost
DROP EVENT e1;
CREATE DEFINER=root@localhost EVENT e1 ON SCHEDULE EVERY 1 DAY DO SELECT 1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
DROP EVENT e1;
ERROR HY000: Unknown event 'e1'
disconnect conn1;
diff --git a/mysql-test/main/except.result b/mysql-test/main/except.result
index d83623370d5..ec7d085c70f 100644
--- a/mysql-test/main/except.result
+++ b/mysql-test/main/except.result
@@ -37,12 +37,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "REPLACED",
"filtered": 100
}
}
@@ -53,12 +56,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "REPLACED",
"filtered": 100
}
}
@@ -85,6 +91,7 @@ ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -92,9 +99,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -108,6 +117,7 @@ ANALYZE
"query_block": {
"select_id": 2,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -115,9 +125,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -139,6 +151,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -146,9 +159,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -164,6 +179,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -171,9 +187,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -187,6 +205,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -194,9 +213,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -281,12 +302,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "REPLACED",
"filtered": 100
}
},
@@ -295,7 +319,9 @@ EXPLAIN
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 2,
"rows": 2,
+ "cost": "REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -310,12 +336,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "REPLACED",
"filtered": 100
}
},
@@ -324,7 +353,9 @@ EXPLAIN
"table": {
"table_name": "t4",
"access_type": "ALL",
+ "loops": 2,
"rows": 2,
+ "cost": "REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -355,6 +386,7 @@ ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -362,9 +394,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -376,9 +410,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 2,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -387,7 +423,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "119",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -397,6 +434,7 @@ ANALYZE
"query_block": {
"select_id": 2,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -404,9 +442,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -418,9 +458,11 @@ ANALYZE
"table": {
"table_name": "t4",
"access_type": "ALL",
+ "loops": 2,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -429,7 +471,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "119",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -448,6 +491,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -455,9 +499,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -473,6 +519,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -480,9 +527,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -494,9 +543,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 2,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -505,7 +556,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "119",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -515,6 +567,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -522,9 +575,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -536,9 +591,11 @@ ANALYZE
"table": {
"table_name": "t4",
"access_type": "ALL",
+ "loops": 2,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -547,7 +604,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "119",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
diff --git a/mysql-test/main/except.test b/mysql-test/main/except.test
index 090826ce94d..d253b288d3b 100644
--- a/mysql-test/main/except.test
+++ b/mysql-test/main/except.test
@@ -11,6 +11,7 @@ insert into t2 values (2,2),(3,3);
EXPLAIN (select a,b from t1) except (select c,d from t2);
EXPLAIN extended (select a,b from t1) except (select c,d from t2);
EXPLAIN extended select * from ((select a,b from t1) except (select c,d from t2)) a;
+--source include/analyze-format.inc
EXPLAIN format=json (select a,b from t1) except (select c,d from t2);
--source include/analyze-format.inc
@@ -43,6 +44,7 @@ insert into t4 values (4,4),(7,7);
EXPLAIN (select a,b,e,f from t1,t3) except (select c,d,g,h from t2,t4);
EXPLAIN (select a,b,e,f from t1,t3) except (select c,d,g,h from t2,t4);
EXPLAIN extended select * from ((select a,b,e,f from t1,t3) except (select c,d,g,h from t2,t4)) a;
+--source include/analyze-format.inc
EXPLAIN format=json (select a,b,e,f from t1,t3) except (select c,d,g,h from t2,t4);
--source include/analyze-format.inc
diff --git a/mysql-test/main/except_all.result b/mysql-test/main/except_all.result
index df19abda077..f79f35ee932 100644
--- a/mysql-test/main/except_all.result
+++ b/mysql-test/main/except_all.result
@@ -65,12 +65,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -81,12 +84,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -120,6 +126,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -127,9 +134,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 7,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -145,6 +154,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -152,9 +162,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 7,
"r_rows": 7,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -168,6 +180,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -175,9 +188,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 7,
"r_rows": 7,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -204,6 +219,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -211,9 +227,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 7,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -229,6 +247,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -236,9 +255,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 7,
"r_rows": 7,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -252,6 +273,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -259,9 +281,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 7,
"r_rows": 7,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -387,12 +411,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
@@ -403,12 +430,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -417,7 +447,9 @@ EXPLAIN
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 3,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -432,12 +464,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -446,7 +481,9 @@ EXPLAIN
"table": {
"table_name": "t4",
"access_type": "ALL",
+ "loops": 2,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -482,6 +519,7 @@ ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -489,9 +527,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -503,9 +543,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 3,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -514,7 +556,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "119",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -524,6 +567,7 @@ ANALYZE
"query_block": {
"select_id": 2,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -531,9 +575,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -545,9 +591,11 @@ ANALYZE
"table": {
"table_name": "t4",
"access_type": "ALL",
+ "loops": 2,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -556,7 +604,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "119",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -574,6 +623,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -581,9 +631,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 9,
"r_rows": 7,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -599,6 +651,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -606,9 +659,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -620,9 +675,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 3,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -631,7 +688,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "119",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -641,6 +699,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -648,9 +707,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -662,9 +723,11 @@ ANALYZE
"table": {
"table_name": "t4",
"access_type": "ALL",
+ "loops": 2,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -673,7 +736,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "119",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
diff --git a/mysql-test/main/except_all.test b/mysql-test/main/except_all.test
index f873b220126..9f788f52dea 100644
--- a/mysql-test/main/except_all.test
+++ b/mysql-test/main/except_all.test
@@ -16,6 +16,7 @@ select * from t1 except all select * from t1 union all select * from t1 union al
select * from (select * from t1 except all select * from t2) q1 except all select * from (select * from t1 except all select * from t2) q2;
EXPLAIN select * from t1 except all select * from t2;
+--source include/explain-no-costs.inc
EXPLAIN format=json select * from t1 except all select * from t2;
EXPLAIN extended (select * from t1) except all (select * from t2);
EXPLAIN extended select * from ((select * from t1) except all (select * from t2)) a;
@@ -53,6 +54,7 @@ select * from ((select a,b,e,f from t1,t3) except all (select c,d,g,h from t2,t4
EXPLAIN (select a,b,e,f from t1,t3) except all (select c,d,g,h from t2,t4);
EXPLAIN select * from ((select a,b,e,f from t1,t3) except all (select c,d,g,h from t2,t4)) t;
EXPLAIN extended select * from ((select a,b,e,f from t1,t3) except all (select c,d,g,h from t2,t4)) t;
+--source include/explain-no-costs.inc
EXPLAIN format=json select * from ((select a,b,e,f from t1,t3) except all (select c,d,g,h from t2,t4)) t;
--source include/analyze-format.inc
@@ -96,4 +98,4 @@ INSERT INTO t VALUES (1),(2);
SELECT * FROM t WHERE i != ANY ( SELECT 3 EXCEPT ALL SELECT 3 );
-drop table t; \ No newline at end of file
+drop table t;
diff --git a/mysql-test/main/explain.result b/mysql-test/main/explain.result
index 8db5e9f51ac..1e546d42d0a 100644
--- a/mysql-test/main/explain.result
+++ b/mysql-test/main/explain.result
@@ -325,7 +325,7 @@ DROP TABLE t1;
# Bug#56814 Explain + subselect + fulltext crashes server
#
CREATE TABLE t1(f1 VARCHAR(6) NOT NULL,
-FULLTEXT KEY(f1),UNIQUE(f1));
+FULLTEXT KEY `fulltext` (f1), UNIQUE `unique` (f1));
INSERT INTO t1 VALUES ('test');
EXPLAIN SELECT 1 FROM t1
WHERE 1 > ALL((SELECT t1.f1 FROM t1 JOIN t1 a ON (MATCH(t1.f1) AGAINST (""))
@@ -333,7 +333,7 @@ WHERE t1.f1 GROUP BY t1.f1));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
2 SUBQUERY a system NULL NULL NULL NULL 1
-2 SUBQUERY t1 fulltext f1_2,f1 f1 0 1 Using where
+2 SUBQUERY t1 fulltext unique,fulltext fulltext 0 1 Using where
PREPARE stmt FROM
'EXPLAIN SELECT 1 FROM t1
WHERE 1 > ALL((SELECT t1.f1 FROM t1 RIGHT OUTER JOIN t1 a
@@ -343,12 +343,12 @@ EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
2 SUBQUERY a system NULL NULL NULL NULL 1
-2 SUBQUERY t1 fulltext f1_2,f1 f1 0 1 Using where
+2 SUBQUERY t1 fulltext unique,fulltext fulltext 0 1 Using where
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
2 SUBQUERY a system NULL NULL NULL NULL 1
-2 SUBQUERY t1 fulltext f1_2,f1 f1 0 1 Using where
+2 SUBQUERY t1 fulltext unique,fulltext fulltext 0 1 Using where
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM
'EXPLAIN SELECT 1 FROM t1
@@ -359,12 +359,13 @@ EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
2 SUBQUERY a system NULL NULL NULL NULL 1
-2 SUBQUERY t1 fulltext f1_2,f1 f1 0 1 Using where
+2 SUBQUERY t1 fulltext unique,fulltext fulltext 0 1 Using where
+INSERT into t1 values('test1'),('test2'),('test3'),('test4'),('test5');
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 system NULL NULL NULL NULL 1
-2 SUBQUERY a system NULL NULL NULL NULL 1
-2 SUBQUERY t1 fulltext f1_2,f1 f1 0 1 Using where
+1 PRIMARY t1 index NULL unique 8 NULL 6 Using index
+2 SUBQUERY t1 fulltext unique,fulltext fulltext 0 1 Using where
+2 SUBQUERY a index NULL unique 8 NULL 6 Using index
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
End of 5.1 tests.
diff --git a/mysql-test/main/explain.test b/mysql-test/main/explain.test
index 36595ba727c..0e4a3b8c2c0 100644
--- a/mysql-test/main/explain.test
+++ b/mysql-test/main/explain.test
@@ -254,7 +254,7 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1(f1 VARCHAR(6) NOT NULL,
-FULLTEXT KEY(f1),UNIQUE(f1));
+FULLTEXT KEY `fulltext` (f1), UNIQUE `unique` (f1));
INSERT INTO t1 VALUES ('test');
EXPLAIN SELECT 1 FROM t1
@@ -279,6 +279,7 @@ PREPARE stmt FROM
WHERE t1.f1 GROUP BY t1.f1))';
EXECUTE stmt;
+INSERT into t1 values('test1'),('test2'),('test3'),('test4'),('test5');
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
diff --git a/mysql-test/main/explain_innodb.result b/mysql-test/main/explain_innodb.result
index b46665c279c..0bdd5a44985 100644
--- a/mysql-test/main/explain_innodb.result
+++ b/mysql-test/main/explain_innodb.result
@@ -15,6 +15,6 @@ explain
SELECT * FROM (SELECT id FROM t1 GROUP BY id) dt WHERE 1=0;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 DERIVED t1 range NULL id 53 NULL 2 Using index for group-by
+2 DERIVED t1 range NULL id 53 NULL 1 Using index for group-by
SET GLOBAL slow_query_log = @sql_tmp;
drop table t1;
diff --git a/mysql-test/main/explain_json.result b/mysql-test/main/explain_json.result
index 3c3c0688ab8..df443003a86 100644
--- a/mysql-test/main/explain_json.result
+++ b/mysql-test/main/explain_json.result
@@ -6,12 +6,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -33,12 +36,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t0.a < 3"
}
@@ -59,12 +65,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t0.a is not null"
}
@@ -78,7 +87,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t0.a"],
+ "loops": 10,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -93,6 +104,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -102,7 +114,9 @@ EXPLAIN
"key": "a1",
"key_length": "5",
"used_key_parts": ["a1"],
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t2.a1 < 5"
}
@@ -115,6 +129,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -138,7 +153,9 @@ EXPLAIN
}
]
},
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a1 = 1 or t2.b1 = 2"
}
@@ -151,6 +168,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -174,7 +192,9 @@ EXPLAIN
}
]
},
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a1 = 1 or t2.b1 = 2 and t2.b2 = 3"
}
@@ -188,6 +208,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -211,7 +232,9 @@ EXPLAIN
}
]
},
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a1 = 1 and t2.a2 = 1 or t2.b1 = 2 and t2.b2 = 1"
}
@@ -225,12 +248,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t0.a is not null"
}
@@ -244,7 +270,9 @@ EXPLAIN
"key_length": "10",
"used_key_parts": ["b1", "b2"],
"ref": ["test.t0.a", "const"],
+ "loops": 10,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -266,12 +294,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "A",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -282,12 +313,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "B",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -307,12 +341,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "A",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -323,12 +360,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "B",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -349,12 +389,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -365,12 +408,15 @@ EXPLAIN
"state": "uninitialized",
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = t0.a"
}
@@ -389,12 +435,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t0.a > (subquery#2) or t0.a < 3"
}
@@ -406,12 +455,15 @@ EXPLAIN
"state": "uninitialized",
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = t0.a"
}
@@ -435,12 +487,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "tbl1",
"access_type": "ALL",
+ "loops": 1,
"rows": 100,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "tbl1.b < 3"
}
@@ -450,7 +505,9 @@ EXPLAIN
"table": {
"table_name": "tbl2",
"access_type": "ALL",
+ "loops": 100,
"rows": 100,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "tbl2.b < 5"
},
@@ -538,12 +595,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -566,17 +626,21 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "tbl.cnt > 0",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "cnt > 0",
"filesort": {
"sort_key": "t1.a",
@@ -586,7 +650,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -607,12 +673,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "tbl2",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "tbl2.a is not null"
}
@@ -626,12 +695,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["cnt"],
"ref": ["test.tbl2.a"],
- "rows": 2,
+ "loops": 10,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "tbl1.cnt = tbl2.a",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -640,7 +712,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -663,12 +737,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a is not null"
}
@@ -682,19 +759,24 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["max(a)"],
"ref": ["test.t1.a"],
+ "loops": 10,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -713,18 +795,20 @@ EXPLAIN
create table t2 like t1;
insert into t2 select * from t1;
explain format=json
-select * from t1,t2 where t1.a in ( select a from t0);
+select * from t1,t2 where t1.a in ( select seq+0 from seq_1_to_100);
EXPLAIN
{
"query_block": {
"select_id": 1,
- "const_condition": "1",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -734,11 +818,12 @@ EXPLAIN
"access_type": "eq_ref",
"possible_keys": ["distinct_key"],
"key": "distinct_key",
- "key_length": "4",
- "used_key_parts": ["a"],
+ "key_length": "8",
+ "used_key_parts": ["seq+0"],
"ref": ["func"],
"rows": 1,
"filtered": 100,
+ "attached_condition": "t1.a = seq_1_to_100.seq + 0",
"materialized": {
"unique": 1,
"query_block": {
@@ -746,10 +831,16 @@ EXPLAIN
"nested_loop": [
{
"table": {
- "table_name": "t0",
- "access_type": "ALL",
- "rows": 10,
- "filtered": 100
+ "table_name": "seq_1_to_100",
+ "access_type": "index",
+ "key": "PRIMARY",
+ "key_length": "8",
+ "used_key_parts": ["seq"],
+ "loops": 1,
+ "rows": 100,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "using_index": true
}
}
]
@@ -762,11 +853,13 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 10,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "1Kb",
+ "buffer_size": "19Kb",
"join_type": "BNL"
}
}
@@ -787,12 +880,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -801,8 +897,10 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 10,
"rows": 10,
- "filtered": 100,
+ "cost": "COST_REPLACED",
+ "filtered": 10,
"first_match": "t2"
},
"buffer_type": "flat",
@@ -830,12 +928,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -846,8 +947,10 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 10,
"rows": 10,
- "filtered": 100
+ "cost": "COST_REPLACED",
+ "filtered": 10
},
"buffer_type": "flat",
"buffer_size": "206",
@@ -874,6 +977,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -883,7 +987,9 @@ EXPLAIN
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.a < 3",
"mrr_type": "Rowid-ordered scan"
@@ -900,12 +1006,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "tbl1",
"access_type": "ALL",
+ "loops": 1,
"rows": 100,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -916,7 +1025,9 @@ EXPLAIN
"table_name": "tbl2",
"access_type": "ALL",
"possible_keys": ["a"],
+ "loops": 100,
"rows": 100,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -930,14 +1041,15 @@ drop table t0;
# MDEV-7265: "Full scan on NULL key", the join case
#
CREATE TABLE t1 (a INT, KEY(a));
-INSERT INTO t1 VALUES (1),(2);
+INSERT INTO t1 VALUES (1),(2),(5),(6),(7);
CREATE TABLE t2 (b INT);
-INSERT INTO t2 VALUES (3),(4);
+INSERT INTO t2 VALUES (3),(4),(9),(10),(11);
EXPLAIN FORMAT=JSON SELECT * FROM t1 AS outer_t1 WHERE a <> ALL ( SELECT a FROM t1, t2 WHERE b <> outer_t1.a );
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -946,7 +1058,9 @@ EXPLAIN
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
- "rows": 2,
+ "loops": 1,
+ "rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "!<in_optimizer>(outer_t1.a,<exists>(subquery#2))",
"using_index": true
@@ -957,6 +1071,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "trigcond(t1.a is null)",
"nested_loop": [
{
@@ -969,7 +1084,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["func"],
- "rows": 2,
+ "loops": 1,
+ "rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "trigcond(<cache>(outer_t1.a) = t1.a or t1.a is null)",
"using_index": true
@@ -981,7 +1098,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
- "rows": 2,
+ "loops": 3,
+ "rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -1013,13 +1132,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "<not>(<in_optimizer>(20000,<max>(subquery#2) >= 20000))",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1028,12 +1150,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "tbl1",
"access_type": "ALL",
+ "loops": 1,
"rows": 100,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1042,7 +1167,9 @@ EXPLAIN
"table": {
"table_name": "tbl2",
"access_type": "ALL",
+ "loops": 100,
"rows": 100,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -1071,14 +1198,17 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
- "attached_condition": "!<in_optimizer>(t1.a,t1.a in (subquery#2))"
+ "attached_condition": "!<in_optimizer>(t1.a,<exists>(subquery#2))"
}
}
],
@@ -1086,13 +1216,18 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
+ "having_condition": "trigcond(t2.b is null)",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
- "filtered": 100
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "attached_condition": "trigcond(<cache>(t1.a) = t2.b or t2.b is null)"
}
}
]
@@ -1129,6 +1264,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1137,7 +1273,9 @@ EXPLAIN
"key": "a",
"key_length": "10",
"used_key_parts": ["a", "b"],
+ "loops": 1,
"rows": 101,
+ "cost": "COST_REPLACED",
"filtered": 100,
"using_index_for_group_by": true
}
@@ -1153,6 +1291,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1163,9 +1302,11 @@ ANALYZE
"key": "a",
"key_length": "10",
"used_key_parts": ["a", "b"],
+ "loops": 1,
"r_loops": 1,
"rows": 101,
"r_rows": 100,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1223,26 +1364,35 @@ analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
+select count(*) from t1;
+count(*)
+128
explain select count(distinct a1,a2,b) from t1 where (a2 >= 'b') and (b = 'a');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL idx_t1_1 147 NULL 17 Using where; Using index for group-by
+1 SIMPLE t1 range NULL idx_t1_2 147 NULL 17 Using where; Using index for group-by
explain select count(distinct a1,a2,b,c) from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL idx_t1_1 163 NULL 65 Using where; Using index for group-by
+1 SIMPLE t1 index NULL idx_t1_1 163 NULL 128 Using where; Using index
+explain select count(distinct a1,a2,b) from t1 where a1 >= "" and (a2 >= 'b') and (b = 'a');
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 147 NULL 1 Using where; Using index for group-by
explain format=json select count(distinct a1,a2,b) from t1 where (a2 >= 'b') and (b = 'a');
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "range",
- "key": "idx_t1_1",
+ "key": "idx_t1_2",
"key_length": "147",
"used_key_parts": ["a1", "a2", "b"],
+ "loops": 1,
"rows": 17,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 'a' and t1.a2 >= 'b'",
"using_index_for_group_by": true
@@ -1256,17 +1406,46 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
- "access_type": "range",
+ "access_type": "index",
"key": "idx_t1_1",
"key_length": "163",
"used_key_parts": ["a1", "a2", "b", "c"],
- "rows": 65,
- "filtered": 100,
+ "loops": 1,
+ "rows": 128,
+ "cost": "COST_REPLACED",
+ "filtered": 0.198364258,
"attached_condition": "t1.b = 'a' and t1.c = 'i121' and t1.a2 >= 'b'",
+ "using_index": true
+ }
+ }
+ ]
+ }
+}
+explain format=json select count(distinct a1,a2,b) from t1 where a1 >= "" and (a2 >= 'b') and (b = 'a');
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t1",
+ "access_type": "range",
+ "possible_keys": ["idx_t1_0", "idx_t1_1", "idx_t1_2"],
+ "key": "idx_t1_1",
+ "key_length": "147",
+ "used_key_parts": ["a1", "a2", "b"],
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "attached_condition": "t1.b = 'a' and t1.a1 >= '' and t1.a2 >= 'b'",
"using_index_for_group_by": true
}
}
@@ -1284,12 +1463,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = _latin1'\xDF'"
}
@@ -1308,12 +1490,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(case when convert(t1.a using utf8mb3) = <cache>(_utf8mb3'a' collate utf8mb3_bin) then NULL else t1.a end)"
}
@@ -1341,6 +1526,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "TOP > t2.a",
"filesort": {
"sort_key": "t2.a",
@@ -1350,7 +1536,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 256,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1365,6 +1553,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t2.a",
"temporary_table": {
@@ -1373,7 +1562,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 256,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1399,6 +1590,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t2.a",
"temporary_table": {
@@ -1407,7 +1599,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 256,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1468,7 +1662,7 @@ insert into t2 values (1),(2);
explain
select * from t1 left join t2 on t2.pk > 10 and t2.pk < 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 const PRIMARY NULL NULL NULL 1 Impossible ON condition
+1 SIMPLE t2 const PRIMARY NULL NULL NULL 0 Impossible ON condition
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
explain format=json
select * from t1 left join t2 on t2.pk > 10 and t2.pk < 0;
@@ -1476,6 +1670,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"nested_loop": [
{
@@ -1483,8 +1678,8 @@ EXPLAIN
"table_name": "t2",
"access_type": "const",
"possible_keys": ["PRIMARY"],
- "rows": 1,
- "filtered": 100,
+ "rows": 0,
+ "filtered": 0,
"impossible_on_condition": true
}
},
@@ -1492,7 +1687,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1508,6 +1705,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"const_condition": "1",
@@ -1518,9 +1716,9 @@ ANALYZE
"access_type": "const",
"possible_keys": ["PRIMARY"],
"r_loops": 0,
- "rows": 1,
+ "rows": 0,
"r_rows": null,
- "filtered": 100,
+ "filtered": 0,
"r_filtered": null,
"impossible_on_condition": true
}
@@ -1529,9 +1727,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1553,12 +1753,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1571,7 +1774,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["pk"],
"ref": ["test.t1.a"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "trigcond(t2.pk is null) and trigcond(trigcond(t1.a is not null))",
"using_index": true,
@@ -1590,6 +1795,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1597,9 +1803,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1615,9 +1823,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["pk"],
"ref": ["test.t1.a"],
+ "loops": 2,
"r_loops": 2,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1642,13 +1852,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a is not null"
}
@@ -1662,7 +1875,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["pk"],
"ref": ["test.t1.a"],
+ "loops": 2,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"using_index": true,
"distinct": true
@@ -1681,6 +1896,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"temporary_table": {
@@ -1689,9 +1905,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1708,9 +1926,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["pk"],
"ref": ["test.t1.a"],
+ "loops": 2,
"r_loops": 2,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1748,12 +1968,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.a is not null"
}
@@ -1768,7 +1991,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t3.a"],
+ "loops": 10,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition_bka": "t4.b + 1 <= t3.b + 1"
},
@@ -1790,6 +2015,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1797,9 +2023,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1817,9 +2045,11 @@ ANALYZE
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t3.a"],
+ "loops": 10,
"r_loops": 1,
"rows": 1,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1830,7 +2060,8 @@ ANALYZE
"buffer_size": "400",
"join_type": "BKA",
"mrr_type": "Rowid-ordered scan",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -1852,12 +2083,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1868,13 +2102,16 @@ EXPLAIN
"state": "uninitialized",
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"outer_ref_condition": "t0.a < 5",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b < t0.a"
}
@@ -1897,6 +2134,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"read_sorted_file": {
@@ -1905,7 +2143,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1919,6 +2159,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"read_sorted_file": {
@@ -1927,7 +2168,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1941,6 +2184,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"read_sorted_file": {
@@ -1949,7 +2193,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1971,12 +2217,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -1987,7 +2236,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 2,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -2001,8 +2252,10 @@ EXPLAIN
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 4,
"rows": 2,
- "filtered": 100
+ "cost": "COST_REPLACED",
+ "filtered": 25
},
"buffer_type": "incremental",
"buffer_size": "109",
diff --git a/mysql-test/main/explain_json.test b/mysql-test/main/explain_json.test
index 17e2da4754c..6403fdefdd7 100644
--- a/mysql-test/main/explain_json.test
+++ b/mysql-test/main/explain_json.test
@@ -2,6 +2,7 @@
# EXPLAIN FORMAT=JSON tests. These are tests developed for MariaDB.
#
--source include/default_optimizer_switch.inc
+--source include/have_sequence.inc
--disable_warnings
drop table if exists t0,t1,t2;
@@ -10,10 +11,13 @@ drop table if exists t0,t1,t2;
create table t0(a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+--source include/explain-no-costs.inc
explain format=json select * from t0;
+--source include/explain-no-costs.inc
explain format=json select * from t0 where 1>2;
+--source include/explain-no-costs.inc
explain format=json select * from t0 where a<3;
--echo # Try a basic join
@@ -25,22 +29,28 @@ select
'filler'
from t0 a, t0 b, t0 c;
+--source include/explain-no-costs.inc
explain format=json select * from t0,t1 where t1.a=t0.a;
--echo # Try range and index_merge
create table t2 (a1 int, a2 int, b1 int, b2 int, key(a1,a2), key(b1,b2));
insert into t2 select a,a,a,a from t1;
+--source include/explain-no-costs.inc
explain format=json select * from t2 where a1<5;
+--source include/explain-no-costs.inc
explain format=json select * from t2 where a1=1 or b1=2;
+--source include/explain-no-costs.inc
explain format=json select * from t2 where a1=1 or (b1=2 and b2=3);
+--source include/explain-no-costs.inc
explain format=json select * from t2 where (a1=1 and a2=1) or
(b1=2 and b2=1);
--echo # Try ref access on two key components
+--source include/explain-no-costs.inc
explain format=json select * from t0,t2 where t2.b1=t0.a and t2.b2=4;
drop table t1,t2;
@@ -48,7 +58,9 @@ drop table t1,t2;
--echo #
--echo # Try a UNION
--echo #
+--source include/explain-no-costs.inc
explain format=json select * from t0 A union select * from t0 B;
+--source include/explain-no-costs.inc
explain format=json select * from t0 A union all select * from t0 B;
--echo #
@@ -56,8 +68,10 @@ explain format=json select * from t0 A union all select * from t0 B;
--echo #
create table t1 (a int, b int);
insert into t1 select a,a from t0;
+--source include/explain-no-costs.inc
explain format=json select a, a > (select max(b) from t1 where t1.b=t0.a) from t0;
+--source include/explain-no-costs.inc
explain format=json
select * from t0 where
a > (select max(b) from t1 where t1.b=t0.a) or a < 3 ;
@@ -70,6 +84,7 @@ drop table t1;
create table t1 (a int, b int);
insert into t1 select tbl1.a+10*tbl2.a, tbl1.a+10*tbl2.a from t0 tbl1, t0 tbl2;
+--source include/explain-no-costs.inc
explain format=json
select * from t1 tbl1, t1 tbl2 where tbl1.a=tbl2.a and tbl1.b < 3 and tbl2.b < 5;
@@ -78,16 +93,22 @@ drop table t1;
--echo #
--echo # Single-table UPDATE/DELETE, INSERT
--echo #
+--source include/explain-no-costs.inc
explain format=json delete from t0;
+--source include/explain-no-costs.inc
explain format=json delete from t0 where 1 > 2;
+--source include/explain-no-costs.inc
explain format=json delete from t0 where a < 3;
+--source include/explain-no-costs.inc
explain format=json update t0 set a=3 where a in (2,3,4);
+--source include/explain-no-costs.inc
explain format=json insert into t0 values (1);
create table t1 like t0;
+--source include/explain-no-costs.inc
explain format=json insert into t1 values ((select max(a) from t0));
drop table t1;
@@ -97,10 +118,12 @@ drop table t1;
--echo #
create table t1 (a int, b int);
insert into t1 select a,a from t0;
+--source include/explain-no-costs.inc
explain format=json
select * from (select a, count(*) as cnt from t1 group by a) as tbl
where cnt>0;
+--source include/explain-no-costs.inc
explain format=json
select * from (select a, count(*) as cnt from t1 group by a) as tbl1, t1 as
tbl2 where cnt=tbl2.a;
@@ -108,6 +131,7 @@ tbl2 where cnt=tbl2.a;
--echo #
--echo # Non-merged semi-join (aka JTBM)
--echo #
+--source include/explain-no-costs.inc
explain format=json
select * from t1 where a in (select max(a) from t1 group by b);
@@ -116,14 +140,16 @@ select * from t1 where a in (select max(a) from t1 group by b);
--echo #
create table t2 like t1;
insert into t2 select * from t1;
+--source include/explain-no-costs.inc
explain format=json
-select * from t1,t2 where t1.a in ( select a from t0);
+select * from t1,t2 where t1.a in ( select seq+0 from seq_1_to_100);
--echo #
--echo # First-Match
--echo #
explain
select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
+--source include/explain-no-costs.inc
explain format=json
select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
@@ -134,6 +160,7 @@ set @tmp= @@optimizer_switch;
set optimizer_switch='firstmatch=off';
explain
select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
+--source include/explain-no-costs.inc
explain format=json
select * from t2 where t2.a in ( select a from t1 where t1.b=t2.b);
set optimizer_switch=@tmp;
@@ -148,10 +175,12 @@ insert into t1 select tbl1.a+10*tbl2.a, 12345 from t0 tbl1, t0 tbl2;
set @tmp= @@optimizer_switch;
set optimizer_switch='mrr=on,mrr_sort_keys=on';
+--source include/explain-no-costs.inc
explain format=json select * from t1 where a < 3;
--echo # 'Range checked for each record'
set optimizer_switch=@tmp;
+--source include/explain-no-costs.inc
explain format=json
select * from t1 tbl1, t1 tbl2 where tbl2.a < tbl1.b;
@@ -163,11 +192,12 @@ drop table t0;
--echo #
CREATE TABLE t1 (a INT, KEY(a));
-INSERT INTO t1 VALUES (1),(2);
+INSERT INTO t1 VALUES (1),(2),(5),(6),(7);
CREATE TABLE t2 (b INT);
-INSERT INTO t2 VALUES (3),(4);
+INSERT INTO t2 VALUES (3),(4),(9),(10),(11);
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON SELECT * FROM t1 AS outer_t1 WHERE a <> ALL ( SELECT a FROM t1, t2 WHERE b <> outer_t1.a );
DROP TABLE t1,t2;
@@ -181,6 +211,7 @@ insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1(a int, b int);
insert into t1 select tbl1.a+10*tbl2.a, 1234 from t0 tbl1, t0 tbl2;
+--source include/explain-no-costs.inc
explain format=json
select * from t0
where
@@ -199,6 +230,7 @@ INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2 (b INT);
INSERT INTO t2 VALUES (3),(4);
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON SELECT * FROM t1 WHERE a <> ALL ( SELECT b FROM t2 );
DROP TABLE t1, t2;
@@ -220,6 +252,7 @@ insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 (a int, b int, c int, d int, key(a,b,c));
insert into t1 select A.a, B.a, C.a, D.a from t2 A, t2 B, t2 C, t2 D;
explain select count(distinct b) from t1 group by a;
+--source include/explain-no-costs.inc
explain format=json select count(distinct b) from t1 group by a;
--source include/analyze-format.inc
analyze format=json select count(distinct b) from t1 group by a;
@@ -271,13 +304,20 @@ create index idx_t1_0 on t1 (a1);
create index idx_t1_1 on t1 (a1,a2,b,c);
create index idx_t1_2 on t1 (a1,a2,b);
analyze table t1;
+select count(*) from t1;
explain select count(distinct a1,a2,b) from t1 where (a2 >= 'b') and (b = 'a');
explain select count(distinct a1,a2,b,c) from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121');
+explain select count(distinct a1,a2,b) from t1 where a1 >= "" and (a2 >= 'b') and (b = 'a');
+--source include/explain-no-costs.inc
explain format=json select count(distinct a1,a2,b) from t1 where (a2 >= 'b') and (b = 'a');
+
+--source include/explain-no-costs.inc
explain format=json select count(distinct a1,a2,b,c) from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121');
+--source include/explain-no-costs.inc
+explain format=json select count(distinct a1,a2,b) from t1 where a1 >= "" and (a2 >= 'b') and (b = 'a');
drop table t1;
--echo #
@@ -285,6 +325,7 @@ drop table t1;
--echo #
CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET latin1);
INSERT INTO t1 VALUES ('a'),('b');
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON SELECT * FROM t1 WHERE a=_latin1 0xDF;
DROP TABLE t1;
@@ -293,6 +334,7 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET latin1);
INSERT INTO t1 VALUES ('a'),('A');
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON SELECT * FROM t1 WHERE NULLIF(a,_utf8'a' COLLATE utf8_bin);
DROP TABLE t1;
@@ -310,12 +352,16 @@ create table t2 (
);
insert into t2 select A.a*1000 + B.a, A.a*1000 + B.a from t0 A, t1 B;
--echo # normal HAVING
+--source include/explain-no-costs.inc
explain format=json select a, max(b) as TOP from t2 group by a having TOP > a;
--echo # HAVING is always TRUE (not printed)
+--source include/explain-no-costs.inc
explain format=json select a, max(b) as TOP from t2 group by a having 1<>2;
--echo # HAVING is always FALSE (intercepted by message)
+--source include/explain-no-costs.inc
explain format=json select a, max(b) as TOP from t2 group by a having 1=2;
--echo # HAVING is absent
+--source include/explain-no-costs.inc
explain format=json select a, max(b) as TOP from t2 group by a;
drop table t0, t1, t2;
@@ -327,6 +373,7 @@ drop table t0, t1, t2;
create table t1 (i int) engine=myisam;
explain
select * from t1;
+--source include/explain-no-costs.inc
explain format=json
select * from t1;
--source include/analyze-format.inc
@@ -343,6 +390,7 @@ insert into t2 values (1),(2);
explain
select * from t1 left join t2 on t2.pk > 10 and t2.pk < 0;
+--source include/explain-no-costs.inc
explain format=json
select * from t1 left join t2 on t2.pk > 10 and t2.pk < 0;
--source include/analyze-format.inc
@@ -352,6 +400,7 @@ select * from t1 left join t2 on t2.pk > 10 and t2.pk < 0;
--echo # Check ET_NOT_EXISTS:
explain
select * from t1 left join t2 on t2.pk=t1.a where t2.pk is null;
+--source include/explain-no-costs.inc
explain format=json
select * from t1 left join t2 on t2.pk=t1.a where t2.pk is null;
--source include/analyze-format.inc
@@ -361,6 +410,7 @@ select * from t1 left join t2 on t2.pk=t1.a where t2.pk is null;
--echo # Check ET_DISTINCT
explain
select distinct t1.a from t1 join t2 on t2.pk=t1.a;
+--source include/explain-no-costs.inc
explain format=json
select distinct t1.a from t1 join t2 on t2.pk=t1.a;
--source include/analyze-format.inc
@@ -387,6 +437,7 @@ set optimizer_switch='mrr=on';
set join_cache_level=6;
explain
select * from t3,t4 where t3.a=t4.a and (t4.b+1 <= t3.b+1);
+--source include/explain-no-costs.inc
explain format=json
select * from t3,t4 where t3.a=t4.a and (t4.b+1 <= t3.b+1);
--source include/analyze-format.inc
@@ -405,6 +456,7 @@ insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 (a int, b int);
insert into t1 select a,a from t0;
+--source include/explain-no-costs.inc
explain format=json
select a, (select max(a) from t1 where t0.a<5 and t1.b<t0.a) from t0;
drop table t0,t1;
@@ -415,8 +467,11 @@ drop table t0,t1;
create table t1 (a int, b int);
insert into t1 values (1,2),(3,4),(2,3);
+--source include/explain-no-costs.inc
explain format=json select * from t1 order by a, b desc;
+--source include/explain-no-costs.inc
explain format=json select * from t1 order by a desc, b desc;
+--source include/explain-no-costs.inc
explain format=json select * from t1 order by a desc, b ;
drop table t1;
@@ -426,6 +481,7 @@ drop table t1;
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2);
+--source include/explain-no-costs.inc
explain FORMAT=JSON
SELECT * FROM t1 t0
WHERE t0.a IN (SELECT t2.a FROM t1 t2 WHERE t0.a IN (SELECT t3.a FROM t1 t3));
@@ -443,4 +499,4 @@ INSERT INTO t2 VALUES
('00:13:41',8),('00:13:42',9);
SET optimizer_trace = 'enabled=on';
SELECT * FROM t1 WHERE a IN ( SELECT b FROM t2 INNER JOIN t1 ON (a = pk) );
-DROP TABLE t1, t2; \ No newline at end of file
+DROP TABLE t1, t2;
diff --git a/mysql-test/main/explain_json_format_partitions.result b/mysql-test/main/explain_json_format_partitions.result
index b76fe29625e..858b2539eb0 100644
--- a/mysql-test/main/explain_json_format_partitions.result
+++ b/mysql-test/main/explain_json_format_partitions.result
@@ -12,13 +12,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"partitions": ["p0"],
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a in (2,3,4)"
}
@@ -34,6 +37,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -42,9 +46,11 @@ ANALYZE
"table_name": "t1",
"partitions": ["p0"],
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
diff --git a/mysql-test/main/explain_json_format_partitions.test b/mysql-test/main/explain_json_format_partitions.test
index 4c7d3f165d1..17972838777 100644
--- a/mysql-test/main/explain_json_format_partitions.test
+++ b/mysql-test/main/explain_json_format_partitions.test
@@ -7,6 +7,7 @@ create table t1 (
) partition by key(a);
insert into t1 select a from t2;
explain partitions select * from t1 where a in (2,3,4);
+--source include/explain-no-costs.inc
explain format=json select * from t1 where a in (2,3,4);
--source include/analyze-format.inc
analyze format=json select * from t1 where a in (2,3,4);
diff --git a/mysql-test/main/explain_json_innodb.result b/mysql-test/main/explain_json_innodb.result
index 871c0d6a258..d59b8aac0c9 100644
--- a/mysql-test/main/explain_json_innodb.result
+++ b/mysql-test/main/explain_json_innodb.result
@@ -16,12 +16,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -30,12 +33,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "tbl_alias1",
"access_type": "ALL",
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "tbl_alias1.column_name_2 is not null and tbl_alias1.column_name_1 is not null"
}
@@ -52,10 +58,11 @@ EXPLAIN
"test.tbl_alias1.column_name_2",
"test.tbl_alias1.column_name_1"
],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
- "attached_condition": "tbl_alias2.c = tbl_alias1.column_name_2",
- "using_index": true
+ "attached_condition": "tbl_alias2.c = tbl_alias1.column_name_2"
}
}
]
diff --git a/mysql-test/main/explain_json_innodb.test b/mysql-test/main/explain_json_innodb.test
index f70df5d5349..68e5ab8198d 100644
--- a/mysql-test/main/explain_json_innodb.test
+++ b/mysql-test/main/explain_json_innodb.test
@@ -20,6 +20,7 @@ INSERT INTO t2 VALUES (3,'United States');
CREATE TABLE t3 (b INT, c VARCHAR(3), PRIMARY KEY (c,b)) ENGINE=InnoDB;
INSERT INTO t3 VALUES (4,'USA'),(5,'CAN');
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON SELECT * FROM t1 WHERE 0 < ALL (
SELECT tbl_alias1.column_name_1 FROM t2 AS tbl_alias1, t3 AS tbl_alias2
WHERE tbl_alias2.b = tbl_alias1.column_name_1 AND tbl_alias2.c = tbl_alias1.column_name_2
diff --git a/mysql-test/main/explain_non_select.result b/mysql-test/main/explain_non_select.result
index d1e7af6afde..7dea232f260 100644
--- a/mysql-test/main/explain_non_select.result
+++ b/mysql-test/main/explain_non_select.result
@@ -145,7 +145,7 @@ DROP TABLE t1;
create table t1 (i int);
explain partitions update t1 set i = 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 0
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
create table t2 (a int, b int) partition by hash(a) partitions 5;
insert into t2 values (0,0),(1,1),(2,2),(3,3),(4,4);
explain partitions update t2 set b=3 where a in (3,4);
diff --git a/mysql-test/main/features.result b/mysql-test/main/features.result
index 0da2e5fe986..f4caba6d317 100644
--- a/mysql-test/main/features.result
+++ b/mysql-test/main/features.result
@@ -11,6 +11,8 @@ Feature_dynamic_columns 0
Feature_fulltext 0
Feature_gis 0
Feature_insert_returning 0
+Feature_into_outfile 0
+Feature_into_variable 0
Feature_invisible_columns 0
Feature_json 0
Feature_locale 0
@@ -187,3 +189,23 @@ drop table t1;
show status like "feature_insert_returning";
Variable_name Value
Feature_insert_returning 1
+#
+# Feature into outfile/variables
+#
+create table t1(id1 int);
+insert into t1 values (1),(2);
+select * into outfile '../../tmp/features_outfile.1' from t1;
+select * from t1 into outfile '../../tmp/features_outfile.2';
+select id1 INTO @x from t1 where id1=1;
+select * from t1 where id1=1 into @y;
+select * from t1 where id1=@x;
+id1
+1
+select @x=@y;
+@x=@y
+1
+drop table t1;
+show status like "feature_into_%";
+Variable_name Value
+Feature_into_outfile 4
+Feature_into_variable 2
diff --git a/mysql-test/main/features.test b/mysql-test/main/features.test
index 14c86255c37..e996223226f 100644
--- a/mysql-test/main/features.test
+++ b/mysql-test/main/features.test
@@ -148,3 +148,19 @@ create table t1(id1 int);
insert into t1 values (1),(2) returning *;
drop table t1;
show status like "feature_insert_returning";
+
+--echo #
+--echo # Feature into outfile/variables
+--echo #
+create table t1(id1 int);
+insert into t1 values (1),(2);
+select * into outfile '../../tmp/features_outfile.1' from t1;
+select * from t1 into outfile '../../tmp/features_outfile.2';
+select id1 INTO @x from t1 where id1=1;
+select * from t1 where id1=1 into @y;
+select * from t1 where id1=@x;
+select @x=@y;
+drop table t1;
+--remove_file $MYSQLTEST_VARDIR/tmp/features_outfile.1
+--remove_file $MYSQLTEST_VARDIR/tmp/features_outfile.2
+show status like "feature_into_%";
diff --git a/mysql-test/main/fetch_first.result b/mysql-test/main/fetch_first.result
index df182381d1c..1be71f24582 100644
--- a/mysql-test/main/fetch_first.result
+++ b/mysql-test/main/fetch_first.result
@@ -435,14 +435,60 @@ id first_name last_name score
7 Bob Trasc 9
2 John Doe 6
6 John Elton 8.1
+analyze FORMAT=JSON
select * from t1
-order by first_name, last_name
+order by first_name, last_name, score
+offset 2 rows
+fetch first 3 rows only;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "read_sorted_file": {
+ "r_rows": 5,
+ "filesort": {
+ "sort_key": "t1.first_name, t1.last_name, t1.score",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "r_limit": 5,
+ "r_used_priority_queue": true,
+ "r_output_rows": 6,
+ "r_sort_mode": "sort_key,addon_fields",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 8,
+ "r_rows": 8,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ }
+ }
+ }
+ }
+ ]
+ }
+}
+select * from t1
+order by first_name, last_name, score
offset 2 rows
fetch first 3 rows only;
id first_name last_name score
2 John Doe 6
6 John Elton 8.1
-5 John Smith 7
+4 John Smith 6
select * from t1
order by first_name, last_name
offset 2 rows
@@ -454,7 +500,7 @@ id first_name last_name score
4 John Smith 6
5 John Smith 7
select * from t1
-order by first_name, last_name
+order by first_name, last_name, score
offset 3 rows
fetch first 3 rows only;
id first_name last_name score
@@ -791,6 +837,14 @@ fetch first 2 rows with ties;
first_name last_name
Alice Fowler
Bob Trasc
+explain select first_name, last_name
+from t1
+where first_name != 'John'
+group by first_name, last_name
+order by first_name
+fetch first 2 rows with ties;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range t1_name t1_name 206 NULL 3 Using where; Using index for group-by
select first_name, last_name
from t1
where first_name != 'John'
@@ -815,7 +869,7 @@ select * from temp_table
order by first_name, last_name;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2 Using filesort
-2 DERIVED t1 range t1_name t1_name 103 NULL 3 Using where; Using index
+2 DERIVED t1 range t1_name t1_name 206 NULL 3 Using where; Using index for group-by
with temp_table as (
select first_name, last_name
from t1
@@ -1007,12 +1061,12 @@ id first_name last_name score
3 John Smith 6
select * from t1 order by first_name desc fetch first 3 rows with ties;
id first_name last_name score
-8 Silvia Ganush 10
-2 John Doe 6
-3 John Smith 6
-4 John Smith 6
-5 John Smith 7
-6 John Elton 8.1
+# Silvia # #
+# John # #
+# John # #
+# John # #
+# John # #
+# John # #
(select * from t1 order by 1 fetch first 3 rows with ties)
intersect
(select * from t1 order by first_name desc fetch first 3 rows with ties)
@@ -1399,7 +1453,7 @@ a b
3 zzz
EXPLAIN SELECT DISTINCT a, b FROM t1 ORDER BY a FETCH FIRST 3 ROWS WITH TIES;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 4 Using temporary; Using filesort
+1 SIMPLE t1 index NULL a 5 NULL 3 Using temporary
SELECT DISTINCT a, b FROM t1 ORDER BY a FETCH FIRST 3 ROWS WITH TIES;
a b
2 foo
diff --git a/mysql-test/main/fetch_first.test b/mysql-test/main/fetch_first.test
index 98bbf1ca06b..1f4b46011b5 100644
--- a/mysql-test/main/fetch_first.test
+++ b/mysql-test/main/fetch_first.test
@@ -349,8 +349,15 @@ order by first_name, last_name
offset 1 rows
fetch first 3 rows with ties;
+--source include/analyze-format.inc
+analyze FORMAT=JSON
select * from t1
-order by first_name, last_name
+order by first_name, last_name, score
+offset 2 rows
+fetch first 3 rows only;
+
+select * from t1
+order by first_name, last_name, score
offset 2 rows
fetch first 3 rows only;
@@ -360,7 +367,7 @@ offset 2 rows
fetch first 3 rows with ties;
select * from t1
-order by first_name, last_name
+order by first_name, last_name, score
offset 3 rows
fetch first 3 rows only;
@@ -614,7 +621,12 @@ where first_name != 'John'
order by first_name
fetch first 2 rows with ties;
-
+explain select first_name, last_name
+from t1
+where first_name != 'John'
+group by first_name, last_name
+order by first_name
+fetch first 2 rows with ties;
select first_name, last_name
from t1
@@ -769,6 +781,7 @@ fetch first 1 row with ties;
--echo # Test union-like operator with multiple fetch first clauses.
--echo #
select * from t1 order by 1 fetch first 3 rows with ties;
+--replace_column 1 # 3 # 4 #
select * from t1 order by first_name desc fetch first 3 rows with ties;
--sorted_result
diff --git a/mysql-test/main/filesort_debug.result b/mysql-test/main/filesort_debug.result
index 4aa40592be7..0280d378778 100644
--- a/mysql-test/main/filesort_debug.result
+++ b/mysql-test/main/filesort_debug.result
@@ -63,6 +63,7 @@ c27 TEXT,
c28 TEXT,
primary key (pk)
);
+insert into t1 (pk) values (1),(2),(3);
CALL mtr.add_suppression("Out of sort memory");
DELETE IGNORE FROM t1 ORDER BY c26,c7,c23,c4,c25,c5,c20,
c19,c21,c8,c1,c27,c28,c3,c9,c22,c24,c6,c2,pk LIMIT 2;
diff --git a/mysql-test/main/filesort_debug.test b/mysql-test/main/filesort_debug.test
index a8833617c09..d5dc6a89507 100644
--- a/mysql-test/main/filesort_debug.test
+++ b/mysql-test/main/filesort_debug.test
@@ -86,6 +86,8 @@ CREATE TABLE t1 (
primary key (pk)
);
+insert into t1 (pk) values (1),(2),(3);
+
CALL mtr.add_suppression("Out of sort memory");
--error ER_OUT_OF_SORTMEMORY
diff --git a/mysql-test/main/flush-innodb.result b/mysql-test/main/flush-innodb.result
index 2c886e4f9fc..5df79e4cc0c 100644
--- a/mysql-test/main/flush-innodb.result
+++ b/mysql-test/main/flush-innodb.result
@@ -8,9 +8,6 @@ DROP TABLE t1;
# WL#6168: FLUSH TABLES ... FOR EXPORT -- parser
#
-# Requires innodb_file_per_table
-SET @old_innodb_file_per_table= @@GLOBAL.innodb_file_per_table;
-SET GLOBAL innodb_file_per_table= 1;
# new "EXPORT" keyword is a valid user variable name:
SET @export = 10;
# new "EXPORT" keyword is a valid SP parameter name:
@@ -300,6 +297,4 @@ DROP TABLE t1;
connection con1;
disconnect con1;
connection default;
-# Reset innodb_file_per_table
-SET GLOBAL innodb_file_per_table= @old_innodb_file_per_table;
# End of 5.6 tests
diff --git a/mysql-test/main/flush-innodb.test b/mysql-test/main/flush-innodb.test
index 7665ae5e077..fcb0608373e 100644
--- a/mysql-test/main/flush-innodb.test
+++ b/mysql-test/main/flush-innodb.test
@@ -18,10 +18,6 @@ DROP TABLE t1;
--echo #
--echo
---echo # Requires innodb_file_per_table
-SET @old_innodb_file_per_table= @@GLOBAL.innodb_file_per_table;
-SET GLOBAL innodb_file_per_table= 1;
-
--echo # new "EXPORT" keyword is a valid user variable name:
SET @export = 10;
@@ -432,7 +428,4 @@ DROP TABLE t1;
--source include/wait_until_disconnected.inc
--connection default
---echo # Reset innodb_file_per_table
-SET GLOBAL innodb_file_per_table= @old_innodb_file_per_table;
-
--echo # End of 5.6 tests
diff --git a/mysql-test/main/fulltext.result b/mysql-test/main/fulltext.result
index 3acde6121ec..3a338fdc847 100644
--- a/mysql-test/main/fulltext.result
+++ b/mysql-test/main/fulltext.result
@@ -594,6 +594,9 @@ CREATE TABLE t2 (a int, b2 char(10), FULLTEXT KEY b2 (b2));
INSERT INTO t2 VALUES (1,'Scargill');
CREATE TABLE t3 (a int, b int);
INSERT INTO t3 VALUES (1,1), (2,1);
+SELECT * FROM t2 where MATCH(b2) AGAINST('scargill' IN BOOLEAN MODE);
+a b2
+1 Scargill
# t2 should use full text index
EXPLAIN
SELECT count(*) FROM t1 WHERE
@@ -603,8 +606,8 @@ WHERE t3.a=t1.a AND MATCH(b2) AGAINST('scargill' IN BOOLEAN MODE)
);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 MATERIALIZED t2 fulltext b2 b2 0 1 Using where
-2 MATERIALIZED t3 ALL NULL NULL NULL NULL 2 Using where
+2 DEPENDENT SUBQUERY t2 fulltext b2 b2 0 1 Using where
+2 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 2 Using where
# should return 0
SELECT count(*) FROM t1 WHERE
not exists(
diff --git a/mysql-test/main/fulltext.test b/mysql-test/main/fulltext.test
index 8b90c9cd81d..ef690be2314 100644
--- a/mysql-test/main/fulltext.test
+++ b/mysql-test/main/fulltext.test
@@ -547,6 +547,8 @@ INSERT INTO t2 VALUES (1,'Scargill');
CREATE TABLE t3 (a int, b int);
INSERT INTO t3 VALUES (1,1), (2,1);
+SELECT * FROM t2 where MATCH(b2) AGAINST('scargill' IN BOOLEAN MODE);
+
--echo # t2 should use full text index
EXPLAIN
SELECT count(*) FROM t1 WHERE
diff --git a/mysql-test/main/func_group.result b/mysql-test/main/func_group.result
index 8f9c27eeb86..de64d01ed4c 100644
--- a/mysql-test/main/func_group.result
+++ b/mysql-test/main/func_group.result
@@ -1820,7 +1820,7 @@ CREATE TABLE t1(f1 YEAR(4));
INSERT INTO t1 VALUES (0000),(2001);
(SELECT MAX(f1) FROM t1) UNION (SELECT MAX(f1) FROM t1);
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def MAX(f1) MAX(f1) 13 4 4 Y 32864 0 63
+def MAX(f1) MAX(f1) 13 4 4 Y 49248 0 63
MAX(f1)
2001
DROP TABLE t1;
@@ -1856,9 +1856,8 @@ NULL
EXPLAIN EXTENDED
SELECT MAX(a) FROM t1 WHERE (1,2) IN (SELECT a,b FROM t2 WHERE b<5) and a<10;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 5 20.00 Using where; FirstMatch
1 PRIMARY t1 range a a 4 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
Warnings:
Note 1003 select max(`test`.`t1`.`a`) AS `MAX(a)` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`a` = 1 and `test`.`t2`.`b` = 2 and `test`.`t1`.`a` < 10
SELECT MAX(a) FROM t1 WHERE (1,2) IN (SELECT a,b FROM t2 WHERE b<5) and a<10;
diff --git a/mysql-test/main/func_group_innodb.result b/mysql-test/main/func_group_innodb.result
index f5a823e4638..9f69f424f33 100644
--- a/mysql-test/main/func_group_innodb.result
+++ b/mysql-test/main/func_group_innodb.result
@@ -246,12 +246,12 @@ INSERT INTO t1(a, b, c) VALUES
('', 'a', 1), ('', 'a', 1), ('', 'a', 2), ('', 'a', 2), ('', 'a', 3),
('', 'a', 3), ('', 'a', 4), ('', 'a', 4), ('', 'a', 5), ('', 'a', 5);
ANALYZE TABLE t1;
-SELECT MIN(c) FROM t1 GROUP BY b;
-MIN(c)
-0
EXPLAIN SELECT MIN(c) FROM t1 GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL b 263 NULL 2 Using index for group-by
+SELECT MIN(c) FROM t1 GROUP BY b;
+MIN(c)
+0
DROP TABLE t1;
#
# MDEV-17589: Stack-buffer-overflow with indexed varchar (utf8) field
diff --git a/mysql-test/main/func_group_innodb.test b/mysql-test/main/func_group_innodb.test
index b1f9a28b190..ca6b083848c 100644
--- a/mysql-test/main/func_group_innodb.test
+++ b/mysql-test/main/func_group_innodb.test
@@ -199,8 +199,8 @@ INSERT INTO t1(a, b, c) VALUES
ANALYZE TABLE t1;
-- enable_result_log
-SELECT MIN(c) FROM t1 GROUP BY b;
EXPLAIN SELECT MIN(c) FROM t1 GROUP BY b;
+SELECT MIN(c) FROM t1 GROUP BY b;
DROP TABLE t1;
diff --git a/mysql-test/main/func_str.result b/mysql-test/main/func_str.result
index b912cfa7cd3..a477c3c2ec4 100644
--- a/mysql-test/main/func_str.result
+++ b/mysql-test/main/func_str.result
@@ -971,17 +971,17 @@ explain extended select length('\n\t\r\b\0\_\%\\');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select octet_length('\n \r\0008\0\\_\\%\\') AS `length('\n\t\r\b\0\_\%\\')`
+Note 1003 select octet_length('\n\t\r\b\0\\_\\%\\') AS `length('\n\t\r\b\0\_\%\\')`
explain extended select bit_length('\n\t\r\b\0\_\%\\');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select bit_length('\n \r\0008\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`
+Note 1003 select bit_length('\n\t\r\b\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`
explain extended select bit_length('\n\t\r\b\0\_\%\\');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select bit_length('\n \r\0008\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`
+Note 1003 select bit_length('\n\t\r\b\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`
explain extended select concat('monty',' was here ','again');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
diff --git a/mysql-test/main/func_time.result b/mysql-test/main/func_time.result
index 7188c5de205..c3c2a883270 100644
--- a/mysql-test/main/func_time.result
+++ b/mysql-test/main/func_time.result
@@ -3803,21 +3803,13 @@ SET @sav_slow_query_log= @@session.slow_query_log;
SET @@session.slow_query_log= ON;
SELECT current_timestamp(6),fn_sleep_before_now() INTO @ts_cur, @ts_func;
SELECT a FROM t_ts LIMIT 1 into @ts_func;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT a FROM t_trig LIMIT 1 into @ts_trig;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
DELETE FROM t_ts;
DELETE FROM t_trig;
SET @@session.slow_query_log= OFF;
SELECT current_timestamp(6),fn_sleep_before_now() INTO @ts_cur, @func_ts;
SELECT a FROM t_ts LIMIT 1 into @ts_func;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT a FROM t_trig LIMIT 1 into @ts_trig;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SET @@session.slow_query_log= @sav_slow_query_log;
DROP FUNCTION fn_sleep_before_now;
DROP TRIGGER trg_insert_t_ts;
diff --git a/mysql-test/main/grant.result b/mysql-test/main/grant.result
index d89bf9f075a..9d95f2fa478 100644
--- a/mysql-test/main/grant.result
+++ b/mysql-test/main/grant.result
@@ -630,7 +630,7 @@ Select Tables To retrieve rows from table
Show databases Server Admin To see all databases with SHOW DATABASES
Show view Tables To see views with SHOW CREATE VIEW
Shutdown Server Admin To shut down the server
-Super Server Admin To use KILL thread, SET GLOBAL, CHANGE MASTER, etc.
+Super Server Admin To set few server variables
Trigger Tables To use triggers
Create tablespace Server Admin To create/alter/drop tablespaces
Update Tables To update existing rows
@@ -1472,8 +1472,6 @@ declare tmp varchar(30);
select col1 from test limit 1 into tmp;
return '1';
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create view v1 as select test.* from test where test.col1=test_function();
grant update (col1) on v1 to 'greg'@'localhost';
drop user 'greg'@'localhost';
diff --git a/mysql-test/main/grant2.result b/mysql-test/main/grant2.result
index 6ba7ddf1e0a..ebf83272713 100644
--- a/mysql-test/main/grant2.result
+++ b/mysql-test/main/grant2.result
@@ -161,7 +161,7 @@ connection con10;
set sql_log_off = 1;
ERROR 42000: Access denied; you need (at least one of) the SUPER privilege(s) for this operation
set sql_log_bin = 0;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
disconnect con10;
connection default;
delete from mysql.user where user like 'mysqltest\_1';
@@ -452,8 +452,6 @@ INSERT INTO t2 VALUES (1);
DROP FUNCTION IF EXISTS f2;
CREATE FUNCTION f2 () RETURNS INT
BEGIN DECLARE v INT; SELECT s1 FROM t2 INTO v; RETURN v; END//
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT f2();
f2()
1
diff --git a/mysql-test/main/grant_binlog_replay.result b/mysql-test/main/grant_binlog_replay.result
index 2c71e70e59b..ea5e14b587d 100644
--- a/mysql-test/main/grant_binlog_replay.result
+++ b/mysql-test/main/grant_binlog_replay.result
@@ -5,15 +5,15 @@
# MDEV-21975 Add BINLOG REPLAY privilege and bind new privileges to gtid_seq_no, preudo_thread_id, server_id, gtid_domain_id
#
#
-# Test that binlog replay statements are not allowed without BINLOG REPLAY or SUPER
+# Test that binlog replay statements are not allowed without BINLOG REPLAY
#
CREATE USER user1@localhost IDENTIFIED BY '';
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG REPLAY, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG REPLAY ON *.* FROM user1@localhost;
connect con1,localhost,user1,,;
connection con1;
BINLOG '';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation
disconnect con1;
connection default;
DROP USER user1@localhost;
@@ -33,20 +33,5 @@ disconnect con1;
connection default;
DROP USER user1@localhost;
#
-# Test that binlog replay statements are allowed with SUPER
-#
-CREATE USER user1@localhost IDENTIFIED BY '';
-GRANT SUPER ON *.* TO user1@localhost;
-SHOW GRANTS FOR user1@localhost;
-Grants for user1@localhost
-GRANT SUPER ON *.* TO `user1`@`localhost`
-connect con1,localhost,user1,,;
-connection con1;
-BINLOG '';
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use
-disconnect con1;
-connection default;
-DROP USER user1@localhost;
-#
# End of 10.5 tests
#
diff --git a/mysql-test/main/grant_binlog_replay.test b/mysql-test/main/grant_binlog_replay.test
index a3078e5023c..3fd157dfbf0 100644
--- a/mysql-test/main/grant_binlog_replay.test
+++ b/mysql-test/main/grant_binlog_replay.test
@@ -9,12 +9,12 @@
--echo #
--echo #
---echo # Test that binlog replay statements are not allowed without BINLOG REPLAY or SUPER
+--echo # Test that binlog replay statements are not allowed without BINLOG REPLAY
--echo #
CREATE USER user1@localhost IDENTIFIED BY '';
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG REPLAY, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG REPLAY ON *.* FROM user1@localhost;
connect (con1,localhost,user1,,);
connection con1;
@@ -25,7 +25,6 @@ disconnect con1;
connection default;
DROP USER user1@localhost;
-
--echo #
--echo # Test that binlog replay statements are allowed with BINLOG REPLAY
--echo #
@@ -46,28 +45,6 @@ disconnect con1;
connection default;
DROP USER user1@localhost;
-
---echo #
---echo # Test that binlog replay statements are allowed with SUPER
---echo #
-
-CREATE USER user1@localhost IDENTIFIED BY '';
-GRANT SUPER ON *.* TO user1@localhost;
-SHOW GRANTS FOR user1@localhost;
-
-connect (con1,localhost,user1,,);
-connection con1;
---error ER_BAD_SLAVE
-# The below fails with a syntax error.
-# This is fine. It's only important that it does not fail on "access denied".
---error ER_SYNTAX_ERROR
-BINLOG '';
---enable_result_log
-disconnect con1;
-
-connection default;
-DROP USER user1@localhost;
-
--echo #
--echo # End of 10.5 tests
--echo #
diff --git a/mysql-test/main/grant_kill.result b/mysql-test/main/grant_kill.result
index e1243a39a70..39a7e8fd5cb 100644
--- a/mysql-test/main/grant_kill.result
+++ b/mysql-test/main/grant_kill.result
@@ -5,13 +5,13 @@
# MDEV-21743 Split up SUPER privilege to smaller privileges
#
#
-# Test that KILL is not allowed without CONNECTION ADMIN or SUPER
+# Test that KILL is not allowed without CONNECTION ADMIN
#
CREATE USER foo@localhost;
GRANT SELECT ON *.* TO foo@localhost;
CREATE USER bar@localhost;
GRANT ALL PRIVILEGES ON *.* TO bar@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM bar@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM bar@localhost;
connect foo,localhost,foo,,;
connect bar,localhost,bar,,;
SELECT user FROM information_schema.processlist ORDER BY user;
@@ -47,25 +47,5 @@ disconnect bar;
DROP USER foo@localhost;
DROP USER bar@localhost;
#
-# Test that KILL is allowed with SUPER
-#
-CREATE USER foo@localhost;
-GRANT SELECT ON *.* TO foo@localhost;
-CREATE USER bar@localhost;
-GRANT PROCESS, SUPER ON *.* TO bar@localhost;
-connect foo,localhost,foo,,;
-connect bar,localhost,bar,,;
-SELECT user FROM information_schema.processlist ORDER BY user;
-user
-bar
-foo
-root
-KILL ID;
-connection default;
-disconnect foo;
-disconnect bar;
-DROP USER foo@localhost;
-DROP USER bar@localhost;
-#
# End of 10.5 tests
#
diff --git a/mysql-test/main/grant_kill.test b/mysql-test/main/grant_kill.test
index 75a25743dc9..4cd7a84d18d 100644
--- a/mysql-test/main/grant_kill.test
+++ b/mysql-test/main/grant_kill.test
@@ -12,14 +12,14 @@
--let $count_sessions=1
--echo #
---echo # Test that KILL is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that KILL is not allowed without CONNECTION ADMIN
--echo #
CREATE USER foo@localhost;
GRANT SELECT ON *.* TO foo@localhost;
CREATE USER bar@localhost;
GRANT ALL PRIVILEGES ON *.* TO bar@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM bar@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM bar@localhost;
--connect (foo,localhost,foo,,)
let $id=`(SELECT id FROM INFORMATION_SCHEMA.PROCESSLIST WHERE user='foo')`;
--connect (bar,localhost,bar,,)
@@ -59,31 +59,6 @@ let $wait_condition=
DROP USER foo@localhost;
DROP USER bar@localhost;
---echo #
---echo # Test that KILL is allowed with SUPER
---echo #
-
-CREATE USER foo@localhost;
-GRANT SELECT ON *.* TO foo@localhost;
-CREATE USER bar@localhost;
-GRANT PROCESS, SUPER ON *.* TO bar@localhost;
---connect (foo,localhost,foo,,)
-let $id=`(SELECT id FROM INFORMATION_SCHEMA.PROCESSLIST WHERE user='foo')`;
---connect (bar,localhost,bar,,)
-SELECT user FROM information_schema.processlist ORDER BY user;
---replace_result $id ID
---eval KILL $id
---connection default
-let $wait_condition=
- select count(*) = 0 from information_schema.processlist
- where user = "foo";
---source include/wait_condition.inc
---disconnect foo
---disconnect bar
---source include/wait_until_count_sessions.inc
-DROP USER foo@localhost;
-DROP USER bar@localhost;
-
--enable_service_connection
--echo #
diff --git a/mysql-test/main/grant_server.result b/mysql-test/main/grant_server.result
index 37b5f67ba4b..b8094f4b1d7 100644
--- a/mysql-test/main/grant_server.result
+++ b/mysql-test/main/grant_server.result
@@ -12,11 +12,11 @@ connection con1;
CREATE SERVER IF NOT EXISTS server_1
FOREIGN DATA WRAPPER mysql
OPTIONS (USER 'mysqltest_1', HOST 'localhost', DATABASE 'test2');
-ERROR 42000: Access denied; you need (at least one of) the SUPER, FEDERATED ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the FEDERATED ADMIN privilege(s) for this operation
ALTER SERVER server_1 OPTIONS(HOST 'Server.Example.Org');
-ERROR 42000: Access denied; you need (at least one of) the SUPER, FEDERATED ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the FEDERATED ADMIN privilege(s) for this operation
DROP SERVER server_1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, FEDERATED ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the FEDERATED ADMIN privilege(s) for this operation
disconnect con1;
connection default;
DROP USER user1@localhost;
@@ -39,7 +39,8 @@ disconnect con1;
connection default;
DROP USER user1@localhost;
#
-# Test that SERVER DDL statements are allowed with SUPER
+# Test that SERVER DDL statements are not allowed with SUPER
+# but only with FEDERATED ADMIN
#
CREATE USER user1@localhost IDENTIFIED BY '';
GRANT SUPER ON *.* TO user1@localhost;
@@ -51,6 +52,15 @@ connection con1;
CREATE SERVER IF NOT EXISTS server_1
FOREIGN DATA WRAPPER mysql
OPTIONS (USER 'mysqltest_1', HOST 'localhost', DATABASE 'test2');
+ERROR 42000: Access denied; you need (at least one of) the FEDERATED ADMIN privilege(s) for this operation
+disconnect con1;
+connection default;
+REVOKE SUPER ON *.* FROM user1@localhost;
+GRANT FEDERATED ADMIN ON *.* TO user1@localhost;
+connect con1,localhost,user1,,;
+CREATE SERVER IF NOT EXISTS server_1
+FOREIGN DATA WRAPPER mysql
+OPTIONS (USER 'mysqltest_1', HOST 'localhost', DATABASE 'test2');
ALTER SERVER server_1 OPTIONS(HOST 'Server.Example.Org');
DROP SERVER server_1;
disconnect con1;
diff --git a/mysql-test/main/grant_server.test b/mysql-test/main/grant_server.test
index 58c6b4e9ab6..f4b61135a7d 100644
--- a/mysql-test/main/grant_server.test
+++ b/mysql-test/main/grant_server.test
@@ -27,7 +27,6 @@ disconnect con1;
connection default;
DROP USER user1@localhost;
-
--echo #
--echo # Test that SERVER DDL statements are allowed with FEDERATED ADMIN
--echo #
@@ -48,9 +47,9 @@ disconnect con1;
connection default;
DROP USER user1@localhost;
-
--echo #
---echo # Test that SERVER DDL statements are allowed with SUPER
+--echo # Test that SERVER DDL statements are not allowed with SUPER
+--echo # but only with FEDERATED ADMIN
--echo #
CREATE USER user1@localhost IDENTIFIED BY '';
@@ -59,6 +58,15 @@ SHOW GRANTS FOR user1@localhost;
connect (con1,localhost,user1,,);
connection con1;
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+CREATE SERVER IF NOT EXISTS server_1
+ FOREIGN DATA WRAPPER mysql
+ OPTIONS (USER 'mysqltest_1', HOST 'localhost', DATABASE 'test2');
+disconnect con1;
+connection default;
+REVOKE SUPER ON *.* FROM user1@localhost;
+GRANT FEDERATED ADMIN ON *.* TO user1@localhost;
+connect (con1,localhost,user1,,);
CREATE SERVER IF NOT EXISTS server_1
FOREIGN DATA WRAPPER mysql
OPTIONS (USER 'mysqltest_1', HOST 'localhost', DATABASE 'test2');
@@ -69,7 +77,6 @@ disconnect con1;
connection default;
DROP USER user1@localhost;
-
--echo #
--echo # End of 10.5 tests
--echo #
diff --git a/mysql-test/main/grant_slave_admin.result b/mysql-test/main/grant_slave_admin.result
index 0f1f2c9985f..66fc02e98b8 100644
--- a/mysql-test/main/grant_slave_admin.result
+++ b/mysql-test/main/grant_slave_admin.result
@@ -5,19 +5,19 @@
# MDEV-21743 Split up SUPER privilege to smaller privileges
#
#
-# Test that slave admin statements are not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that slave admin statements are not allowed without REPLICATION SLAVE ADMIN
#
CREATE USER user1@localhost IDENTIFIED BY '';
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect con1,localhost,user1,,;
connection con1;
START SLAVE;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
CHANGE MASTER TO MASTER_HOST='127.0.0.1';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
STOP SLAVE;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
disconnect con1;
connection default;
DROP USER user1@localhost;
@@ -41,24 +41,5 @@ disconnect con1;
connection default;
DROP USER user1@localhost;
#
-# Test that slave admin statements are allowed with SUPER
-#
-CREATE USER user1@localhost IDENTIFIED BY '';
-GRANT SUPER ON *.* TO user1@localhost;
-SHOW GRANTS FOR user1@localhost;
-Grants for user1@localhost
-GRANT SUPER ON *.* TO `user1`@`localhost`
-connect con1,localhost,user1,,;
-connection con1;
-START SLAVE;
-ERROR HY000: Misconfigured slave: MASTER_HOST was not set; Fix in config file or with CHANGE MASTER TO
-CHANGE MASTER TO MASTER_USER='root';
-STOP SLAVE;
-Warnings:
-Note 1255 Slave already has been stopped
-disconnect con1;
-connection default;
-DROP USER user1@localhost;
-#
# End of 10.5 tests
#
diff --git a/mysql-test/main/grant_slave_admin.test b/mysql-test/main/grant_slave_admin.test
index d73c31e0cf2..f4099975d90 100644
--- a/mysql-test/main/grant_slave_admin.test
+++ b/mysql-test/main/grant_slave_admin.test
@@ -9,12 +9,12 @@
--echo #
--echo #
---echo # Test that slave admin statements are not allowed without REPLICATION SLAVE ADMIN or SUPER
+--echo # Test that slave admin statements are not allowed without REPLICATION SLAVE ADMIN
--echo #
CREATE USER user1@localhost IDENTIFIED BY '';
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect (con1,localhost,user1,,);
connection con1;
@@ -29,7 +29,6 @@ disconnect con1;
connection default;
DROP USER user1@localhost;
-
--echo #
--echo # Test that slave admin statements are allowed with REPLICATION SLAVE ADMIN
--echo #
@@ -49,26 +48,6 @@ disconnect con1;
connection default;
DROP USER user1@localhost;
-
---echo #
---echo # Test that slave admin statements are allowed with SUPER
---echo #
-
-CREATE USER user1@localhost IDENTIFIED BY '';
-GRANT SUPER ON *.* TO user1@localhost;
-SHOW GRANTS FOR user1@localhost;
-
-connect (con1,localhost,user1,,);
-connection con1;
---error ER_BAD_SLAVE
-START SLAVE;
-CHANGE MASTER TO MASTER_USER='root';
-STOP SLAVE;
-disconnect con1;
-
-connection default;
-DROP USER user1@localhost;
-
--echo #
--echo # End of 10.5 tests
--echo #
diff --git a/mysql-test/main/grant_slave_monitor.result b/mysql-test/main/grant_slave_monitor.result
index 68df790074a..78f6b23b1ff 100644
--- a/mysql-test/main/grant_slave_monitor.result
+++ b/mysql-test/main/grant_slave_monitor.result
@@ -1,19 +1,18 @@
CREATE USER user1@localhost IDENTIFIED BY '';
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE SLAVE MONITOR, SUPER ON *.* FROM user1@localhost;
+REVOKE SLAVE MONITOR ON *.* FROM user1@localhost;
FLUSH PRIVILEGES;
connect con1,localhost,user1,,;
connection con1;
SHOW GRANTS;
Grants for user1@localhost
-GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, BINLOG MONITOR, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, DELETE HISTORY, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY ON *.* TO `user1`@`localhost`
+GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, SUPER, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, BINLOG MONITOR, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, DELETE HISTORY, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY ON *.* TO `user1`@`localhost`
#
# Verify that having REPLICATION SLAVE ADMIN doesn't allow SHOW SLAVE STATUS
-# Expected error: Access denied; you need (at least one of) the SUPER, SLAVE
-# MONITOR privilege(s) for this operation
+# Expected error: Access denied; you need (at least one of) the SLAVE MONITOR privilege(s) for this operation
#
SHOW SLAVE STATUS;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SLAVE MONITOR privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SLAVE MONITOR privilege(s) for this operation
#
# Verify that having REPLICATION SLAVE ADMIN doesn't allow SHOW RELAYLOG EVENTS
# Expected error: Access denied; you need (at least one of) the REPLICA MONITOR
@@ -32,28 +31,13 @@ connect con1,localhost,user1,,;
connection con1;
SHOW GRANTS;
Grants for user1@localhost
-GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, BINLOG MONITOR, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, DELETE HISTORY, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY, SLAVE MONITOR ON *.* TO `user1`@`localhost`
+GRANT ALL PRIVILEGES ON *.* TO `user1`@`localhost`
SHOW SLAVE STATUS;
SHOW RELAYLOG EVENTS;
disconnect con1;
connection default;
DROP USER user1@localhost;
#
-# SHOW SLAVE STATUS command is allowed with SUPER privilege
-#
-CREATE USER user1@localhost IDENTIFIED BY '';
-GRANT SUPER ON *.* TO user1@localhost;
-connect con1,localhost,user1,,;
-SHOW SLAVE STATUS;
-#
-# SHOW RELAYLOG EVENTS is not allowed with SUPER privilege, it requires SLAVE MONITOR
-#
-SHOW RELAYLOG EVENTS;
-ERROR 42000: Access denied; you need (at least one of) the SLAVE MONITOR privilege(s) for this operation
-disconnect con1;
-connection default;
-DROP USER user1@localhost;
-#
# MDEV-25030 Upgrade to 10.5.9 breaks root's ability to grant
#
insert mysql.global_priv values ('bar', 'foo7', '{"access":274877906943,"version_id":100507,"plugin":"mysql_native_password","authentication_string":""}'),
@@ -68,7 +52,7 @@ Grants for foo8@bar
GRANT ALL PRIVILEGES ON *.* TO `foo8`@`bar` WITH GRANT OPTION
show grants for foo9@bar;
Grants for foo9@bar
-GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, SUPER, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, BINLOG MONITOR, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, DELETE HISTORY, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY ON *.* TO `foo9`@`bar` WITH GRANT OPTION
+GRANT ALL PRIVILEGES ON *.* TO `foo9`@`bar` WITH GRANT OPTION
drop user foo7@bar, foo8@bar, foo9@bar;
#
# End of 10.5 tests
diff --git a/mysql-test/main/grant_slave_monitor.test b/mysql-test/main/grant_slave_monitor.test
index af4399b7150..b5e65ef2cb4 100644
--- a/mysql-test/main/grant_slave_monitor.test
+++ b/mysql-test/main/grant_slave_monitor.test
@@ -7,14 +7,12 @@
# ==== Implementation ====
#
# Step1: GRANT ALL privileges for a new user 'user1' and then REVOKE
-# SLAVE MONITOR and SUPER privileges.
+# SLAVE MONITOR privileges.
# Step2: Execute SHOW SLAVE STAUTS/SHOW RELAYLOG EVENTS commands and expect
# ER_SPECIFIC_ACCESS_DENIED_ERROR. This also verifies that REPLICATION
# SLAVE ADMIN privilege is not required for these two commands.
# Step3: GRANT SLAVE MONITOR privilege and observe that both commands are
# allowd to execute.
-# Step4: GRANT SUPER privilege and observe that only SHOW SLAVE STATUS command
-# is allowed.
#
# ==== References ====
#
@@ -27,7 +25,7 @@
CREATE USER user1@localhost IDENTIFIED BY '';
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE SLAVE MONITOR, SUPER ON *.* FROM user1@localhost;
+REVOKE SLAVE MONITOR ON *.* FROM user1@localhost;
FLUSH PRIVILEGES;
--connect(con1,localhost,user1,,)
@@ -36,8 +34,7 @@ SHOW GRANTS;
--echo #
--echo # Verify that having REPLICATION SLAVE ADMIN doesn't allow SHOW SLAVE STATUS
---echo # Expected error: Access denied; you need (at least one of) the SUPER, SLAVE
---echo # MONITOR privilege(s) for this operation
+--echo # Expected error: Access denied; you need (at least one of) the SLAVE MONITOR privilege(s) for this operation
--echo #
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
SHOW SLAVE STATUS;
@@ -77,30 +74,6 @@ SHOW RELAYLOG EVENTS;
DROP USER user1@localhost;
--echo #
---echo # SHOW SLAVE STATUS command is allowed with SUPER privilege
---echo #
-CREATE USER user1@localhost IDENTIFIED BY '';
-GRANT SUPER ON *.* TO user1@localhost;
-
---connect(con1,localhost,user1,,)
---disable_result_log
-SHOW SLAVE STATUS;
---enable_result_log
-
---echo #
---echo # SHOW RELAYLOG EVENTS is not allowed with SUPER privilege, it requires SLAVE MONITOR
---echo #
-
---disable_ps_protocol
---error ER_SPECIFIC_ACCESS_DENIED_ERROR
-SHOW RELAYLOG EVENTS;
---enable_ps_protocol
---disconnect con1
-
---connection default
-DROP USER user1@localhost;
-
---echo #
--echo # MDEV-25030 Upgrade to 10.5.9 breaks root's ability to grant
--echo #
insert mysql.global_priv values ('bar', 'foo7', '{"access":274877906943,"version_id":100507,"plugin":"mysql_native_password","authentication_string":""}'),
diff --git a/mysql-test/main/greedy_optimizer.result b/mysql-test/main/greedy_optimizer.result
index 55812cb6ff9..5ff3bd62e89 100644
--- a/mysql-test/main/greedy_optimizer.result
+++ b/mysql-test/main/greedy_optimizer.result
@@ -127,7 +127,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t6.c62 1 Using index
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1693.637037
+Last_query_cost 4.284314
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3
@@ -139,55 +139,55 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t6.c62 1 Using index
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1693.637037
+Last_query_cost 4.284314
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.602062
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.602062
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using where
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.621783
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
+1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.621783
set optimizer_prune_level=0;
select @@optimizer_prune_level;
@@optimizer_prune_level
@@ -207,7 +207,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1371.437037
+Last_query_cost 2.998640
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
@@ -219,87 +219,87 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1371.437037
+Last_query_cost 2.998640
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using join buffer (flat, BNL join)
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
+1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 362.618727
+Last_query_cost 0.602062
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using join buffer (flat, BNL join)
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
+1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 362.618727
+Last_query_cost 0.602062
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using where
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where
+1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 362.618727
+Last_query_cost 0.621783
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using where
+1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 362.618727
+Last_query_cost 0.621783
set optimizer_search_depth=1;
select @@optimizer_search_depth;
@@optimizer_search_depth
1
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 3
+1 SIMPLE t7 index PRIMARY PRIMARY 4 NULL 21 Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.c22 1
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.c42 1
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t6.c62 1 Using index
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1693.637037
+Last_query_cost 42.599713
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 3
+1 SIMPLE t7 index PRIMARY PRIMARY 4 NULL 21 Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.c22 1
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.c42 1
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t6.c62 1 Using index
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1693.637037
+Last_query_cost 42.599713
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
+1 SIMPLE t3 index PRIMARY PRIMARY 4 NULL 9 Using index
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
@@ -307,19 +307,19 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 14.817907
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
+1 SIMPLE t3 index PRIMARY PRIMARY 4 NULL 9 Using index
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 14.817907
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
@@ -331,7 +331,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 1.698747
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
@@ -343,7 +343,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 1.698747
set optimizer_search_depth=62;
select @@optimizer_search_depth;
@@optimizer_search_depth
@@ -359,7 +359,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1371.437037
+Last_query_cost 2.998640
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
@@ -371,55 +371,55 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1371.437037
+Last_query_cost 2.998640
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using join buffer (flat, BNL join)
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
+1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 362.618727
+Last_query_cost 0.602062
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using join buffer (flat, BNL join)
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
+1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 362.618727
+Last_query_cost 0.602062
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using where
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where
+1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 362.618727
+Last_query_cost 0.621783
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using where
+1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 362.618727
+Last_query_cost 0.621783
set optimizer_prune_level=2;
select @@optimizer_prune_level;
@@optimizer_prune_level
@@ -439,7 +439,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t6.c62 1 Using index
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1693.637037
+Last_query_cost 4.284314
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3
@@ -451,87 +451,87 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t6.c62 1 Using index
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1693.637037
+Last_query_cost 4.284314
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.602062
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.602062
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using where
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.621783
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using where
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.621783
set optimizer_search_depth=1;
select @@optimizer_search_depth;
@@optimizer_search_depth
1
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 3
+1 SIMPLE t7 index PRIMARY PRIMARY 4 NULL 21 Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.c22 1
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.c42 1
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t6.c62 1 Using index
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1693.637037
+Last_query_cost 42.599713
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 3
+1 SIMPLE t7 index PRIMARY PRIMARY 4 NULL 21 Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.c22 1
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.c42 1
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t6.c62 1 Using index
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1693.637037
+Last_query_cost 42.599713
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
+1 SIMPLE t3 index PRIMARY PRIMARY 4 NULL 9 Using index
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
@@ -539,19 +539,19 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 14.817907
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
+1 SIMPLE t3 index PRIMARY PRIMARY 4 NULL 9 Using index
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 14.817907
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
@@ -563,7 +563,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 1.698747
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
@@ -575,7 +575,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 1.698747
set optimizer_search_depth=62;
select @@optimizer_search_depth;
@@optimizer_search_depth
@@ -591,7 +591,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t6.c62 1 Using index
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1693.637037
+Last_query_cost 4.284314
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c12 = t2.c21 and t2.c22 = t3.c31 and t3.c32 = t4.c41 and t4.c42 = t5.c51 and t5.c52 = t6.c61 and t6.c62 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3
@@ -603,55 +603,55 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t6.c62 1 Using index
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 1693.637037
+Last_query_cost 4.284314
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.602062
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using index
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using index
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using index
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.602062
explain select t1.c11 from t1, t2, t3, t4, t5, t6, t7 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using where
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.621783
explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and t1.c12 = t3.c31 and t1.c13 = t4.c41 and t1.c14 = t5.c51 and t1.c15 = t6.c61 and t1.c16 = t7.c71 and t2.c22 = t3.c32 and t2.c23 = t4.c42 and t2.c24 = t5.c52 and t2.c25 = t6.c62 and t2.c26 = t7.c72 and t3.c33 = t4.c43 and t3.c34 = t5.c53 and t3.c35 = t6.c63 and t3.c36 = t7.c73 and t4.c42 = t5.c54 and t4.c43 = t6.c64 and t4.c44 = t7.c74 and t5.c52 = t6.c65 and t5.c53 = t7.c75 and t6.c62 = t7.c76;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.c21 1 Using where
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.c12 1 Using where
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t1.c14 1 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t7 eq_ref PRIMARY PRIMARY 4 test.t1.c16 1 Using where
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 844.037037
+Last_query_cost 0.621783
drop table t1,t2,t3,t4,t5,t6,t7;
CREATE TABLE t1 (a int, b int, d int, i int);
INSERT INTO t1 VALUES (1,1,1,1);
@@ -802,6 +802,9 @@ WHERE t100.K=t10.I
AND t10000.K=t10.I;
COUNT(*)
9
+#####
+# Expect all variants of EQ joining t100 & t10000 with T10
+# to have same cost # handler_reads:
flush status;
EXPLAIN SELECT COUNT(*) FROM t10,t100,t10000
WHERE t100.K=t10.I
@@ -909,6 +912,12 @@ AND t10000.K=t10.K;
COUNT(*)
9
### NOTE: Handler_reads: 31, expected: 30 ###
+#####
+## EQ_REF Should be executed before table scan(ALL)
+## - Independent of #records in table being EQ_REF-joined
+#####
+#####
+# Expect: Join EQ_REF(t100) before ALL(t10000)
flush status;
EXPLAIN SELECT STRAIGHT_JOIN COUNT(*) FROM t10,t100,t10000
WHERE t100.K=t10.I
@@ -948,6 +957,8 @@ WHERE t100.K=t10.I
AND t10000.I=t10.I;
COUNT(*)
9000
+#####
+# Expect: Join EQ_REF(t10000) before ALL(t100) (star-join)
flush status;
EXPLAIN SELECT STRAIGHT_JOIN COUNT(*) FROM t10,t10000,t100
WHERE t100.I=t10.I
@@ -1183,6 +1194,9 @@ AND t10000.I=t10.I;
COUNT(*)
9000
### NOTE: Handler_reads: 9030, expected: 9045 ###
+#####
+## EQ_REF & REF join two instances of t10000 with t10:
+#####
flush status;
EXPLAIN SELECT STRAIGHT_JOIN COUNT(*) FROM t10,t10000 x,t10000 y
WHERE x.k=t10.i;
@@ -2902,7 +2916,7 @@ EXPLAIN SELECT COUNT(*) FROM t1 AS x JOIN t1 ON t1.K=x.I JOIN t2 ON t2.K=x.I JOI
DROP TABLE t100, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22, t23, t24, t25, t26, t27, t28, t29, t30, t31, t32, t33, t34, t35, t36, t37, t38, t39, t40, t41, t42, t43, t44, t45, t46, t47, t48, t49, t50, t51, t52, t53, t54, t55, t56, t57, t58, t59, t60, t61;
show status like "optimizer%";
Variable_name Value
-Optimizer_join_prefixes_check_calls 57916
+Optimizer_join_prefixes_check_calls 63164
SET OPTIMIZER_SEARCH_DEPTH = DEFAULT;
#
# Bug found when testing greedy optimizer tests
@@ -2917,8 +2931,8 @@ explain SELECT * FROM t1 AS alias1
WHERE alias1.col_varchar_key IN (SELECT COUNT(*) FROM t1 AS SQ3_alias2 JOIN t1 AS SQ3_alias3 ON (SQ3_alias3.col_varchar_key = SQ3_alias2.col_varchar_key AND SQ3_alias3.pk = SQ3_alias2.pk));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY alias1 index NULL col_varchar_key 20 NULL 2 Using where; Using index
-2 DEPENDENT SUBQUERY SQ3_alias2 index PRIMARY,col_varchar_key col_varchar_key 20 NULL 2 Using index
-2 DEPENDENT SUBQUERY SQ3_alias3 eq_ref PRIMARY,col_varchar_key PRIMARY 4 test.SQ3_alias2.pk 1 Using where
+2 DEPENDENT SUBQUERY SQ3_alias2 index PRIMARY,col_varchar_key col_varchar_key 20 NULL 2 Using where; Using index
+2 DEPENDENT SUBQUERY SQ3_alias3 ref PRIMARY,col_varchar_key col_varchar_key 11 test.SQ3_alias2.col_varchar_key 1 Using where; Using index
drop table t1;
#
# This triggered an assert failure while testing
diff --git a/mysql-test/main/greedy_optimizer.test b/mysql-test/main/greedy_optimizer.test
index 2a830c70677..8e90a91d7fa 100644
--- a/mysql-test/main/greedy_optimizer.test
+++ b/mysql-test/main/greedy_optimizer.test
@@ -441,9 +441,9 @@ WHERE t100.K=t10.I
AND t10000.K=t10.I;
--source include/check_qep.inc
-#####
-# Expect all variants of EQ joining t100 & t10000 with T10
-# to have same cost # handler_reads:
+--echo #####
+--echo # Expect all variants of EQ joining t100 & t10000 with T10
+--echo # to have same cost # handler_reads:
let $query=
SELECT COUNT(*) FROM t10,t100,t10000
WHERE t100.K=t10.I
@@ -493,12 +493,12 @@ WHERE t100.K=t10.I
--source include/check_qep.inc
-#####
-## EQ_REF Should be executed before table scan(ALL)
-## - Independent of #records in table being EQ_REF-joined
-#####
-#####
-# Expect: Join EQ_REF(t100) before ALL(t10000)
+--echo #####
+--echo ## EQ_REF Should be executed before table scan(ALL)
+--echo ## - Independent of #records in table being EQ_REF-joined
+--echo #####
+--echo #####
+--echo # Expect: Join EQ_REF(t100) before ALL(t10000)
let $query=
SELECT STRAIGHT_JOIN COUNT(*) FROM t10,t100,t10000
WHERE t100.K=t10.I
@@ -517,8 +517,8 @@ WHERE t100.K=t10.I
AND t10000.I=t10.I;
--source include/check_qep.inc
-#####
-# Expect: Join EQ_REF(t10000) before ALL(t100) (star-join)
+--echo #####
+--echo # Expect: Join EQ_REF(t10000) before ALL(t100) (star-join)
let $query=
SELECT STRAIGHT_JOIN COUNT(*) FROM t10,t10000,t100
WHERE t100.I=t10.I
@@ -654,9 +654,9 @@ WHERE t100.K=t10.I
--source include/check_qep.inc
-#####
-## EQ_REF & REF join two instances of t10000 with t10:
-#####
+--echo #####
+--echo ## EQ_REF & REF join two instances of t10000 with t10:
+--echo #####
#####
## Expect this QEP, cost & #handler_read
diff --git a/mysql-test/main/group_by.result b/mysql-test/main/group_by.result
index ba403fa8b73..ef5e500c314 100644
--- a/mysql-test/main/group_by.result
+++ b/mysql-test/main/group_by.result
@@ -552,12 +552,12 @@ a b
3 1
explain select t1.a,t2.b from t1,t2 where t1.a=t2.a group by t1.a,t2.b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL a NULL NULL NULL 4 Using temporary; Using filesort
-1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
+1 SIMPLE t2 ref a a 4 test.t1.a 1
explain select t1.a,t2.b from t1,t2 where t1.a=t2.a group by t1.a,t2.b ORDER BY NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL a NULL NULL NULL 4 Using temporary
-1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using temporary
+1 SIMPLE t2 ref a a 4 test.t1.a 1
drop table t1,t2;
SET @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity,@@optimizer_switch=@save_optimizer_switch;
create table t1 (a int, b int);
@@ -890,7 +890,8 @@ Level Code Message
drop table t1, t2;
CREATE TABLE t1 (a tinyint(3), b varchar(255), PRIMARY KEY (a));
INSERT INTO t1 VALUES (1,'-----'), (6,'Allemagne'), (17,'Autriche'),
-(25,'Belgique'), (54,'Danemark'), (62,'Espagne'), (68,'France');
+(25,'Belgique'), (54,'Danemark'), (62,'Espagne'), (68,'France'),
+(100,"No land"), (101,"No land");
CREATE TABLE t2 (a tinyint(3), b tinyint(3), PRIMARY KEY (a), KEY b (b));
INSERT INTO t2 VALUES (1,1), (2,1), (6,6), (18,17), (15,25), (16,25),
(17,25), (10,54), (5,62),(3,68);
@@ -1352,7 +1353,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY,i2 PRIMARY 4 NULL 1 Using where; Using index
EXPLAIN SELECT a FROM t1 WHERE a < 2 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,i2 PRIMARY 4 NULL 1 Using where; Using index
+1 SIMPLE t1 range PRIMARY,i2 i2 4 NULL 1 Using where; Using index for group-by
EXPLAIN SELECT a FROM t1 IGNORE INDEX (PRIMARY,i2);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 144
@@ -1578,7 +1579,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN SELECT a FROM t1 FORCE INDEX FOR JOIN (i2)
FORCE INDEX FOR GROUP BY (i2) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL i2 4 NULL 145 Using index for group-by
+1 SIMPLE t1 index NULL i2 9 NULL 144 Using index
EXPLAIN SELECT a FROM t1 USE INDEX () IGNORE INDEX (i2);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 144
@@ -1701,7 +1702,7 @@ NULL 1
1 2
EXPLAIN SELECT a from t2 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL a 5 NULL 7 Using index for group-by
+1 SIMPLE t2 range NULL a 5 NULL 6 Using index for group-by
SELECT a from t2 GROUP BY a;
a
NULL
@@ -1714,6 +1715,18 @@ b
NULL
1
2
+insert into t2 SELECT NULL, NULL from seq_1_to_10;
+EXPLAIN SELECT b from t2 GROUP BY a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range NULL a 5 NULL 9 Using index for group-by
+# Expect: Using index for group-by
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+EXPLAIN SELECT b from t2 GROUP BY a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range NULL a 5 NULL 6 Using index for group-by
DROP TABLE t1;
DROP TABLE t2;
CREATE TABLE t1 ( a INT, b INT );
@@ -1959,8 +1972,8 @@ SELECT a, AVG(t1.b),
FROM t1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index NULL a 10 NULL 9 Using index
-3 DEPENDENT SUBQUERY t12 ref a a 10 func,func 2 Using index condition
-2 DEPENDENT SUBQUERY t11 ref a a 10 func,func 2 Using index condition
+3 DEPENDENT SUBQUERY t12 ref a a 10 func,func 1 Using index condition
+2 DEPENDENT SUBQUERY t11 ref a a 10 func,func 1 Using index condition
SELECT a, AVG(t1.b),
(SELECT t11.c FROM t1 t11 WHERE t11.a = t1.a AND t11.b = AVG(t1.b)) AS t11c,
(SELECT t12.c FROM t1 t12 WHERE t12.a = t1.a AND t12.b = AVG(t1.b)) AS t12c
@@ -2452,7 +2465,7 @@ test.t1 analyze status OK
EXPLAIN SELECT SQL_BUFFER_RESULT MIN(a), b FROM t1 WHERE t1.b = 'a' GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 9 NULL 2 Using where; Using index for group-by; Using temporary
+1 SIMPLE t1 range b b 9 NULL 1 Using where; Using index for group-by; Using temporary
SELECT SQL_BUFFER_RESULT MIN(a), b FROM t1 WHERE t1.b = 'a' GROUP BY b;
MIN(a) b
@@ -2460,7 +2473,7 @@ MIN(a) b
EXPLAIN SELECT MIN(a), b FROM t1 WHERE t1.b = 'a' GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 9 NULL 2 Using where; Using index for group-by
+1 SIMPLE t1 range b b 9 NULL 1 Using where; Using index for group-by
SELECT MIN(a), b FROM t1 WHERE t1.b = 'a' GROUP BY b;
MIN(a) b
@@ -2989,3 +3002,34 @@ drop table t20, t21, t22;
#
# End of 10.3 tests
#
+#
+# Test new group_min_max optimization
+#
+create table t1 (a int, b int, c int, key(a,b,c));
+insert into t1 select mod(seq,23),mod(seq,13), mod(seq,5) from seq_1_to_10000;
+explain select a from t1 where a in (1,2,3) group by a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index for group-by
+explain select a from t1 where a in (1,2,3) or a = 22 group by a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 5 NULL 4 Using where; Using index for group-by
+explain select a from t1 where a in (1,2,3) and a < 3 group by a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index for group-by
+explain select a,b from t1 where (a) in (1,2,3) and b in (5,6,7) group by a,b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 10 NULL 9 Using where; Using index for group-by
+explain select a,b from t1 where (a,b) in ((1,1),(2,2),(3,3)) group by a,b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 10 NULL 3 Using where; Using index for group-by
+explain select a,b,c from t1 where (a,b) in ((1,1),(2,2),(3,3)) and c=3 group by a,b,c;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 15 NULL 3 Using where; Using index for group-by
+# Will not use index for group-by
+explain select a from t1 where a in (1,2,3) and b>1 group by a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 10 NULL 983 Using where; Using index
+explain select a from t1 where a in (1,2,3) and c=1 group by a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 5 NULL 1161 Using where; Using index
+drop table t1;
diff --git a/mysql-test/main/group_by.test b/mysql-test/main/group_by.test
index 357959d4071..3aa7f1cd0d3 100644
--- a/mysql-test/main/group_by.test
+++ b/mysql-test/main/group_by.test
@@ -1,3 +1,5 @@
+--source include/have_sequence.inc
+
# Initialise
--disable_warnings
drop table if exists t1,t2,t3;
@@ -716,7 +718,8 @@ drop table t1, t2;
CREATE TABLE t1 (a tinyint(3), b varchar(255), PRIMARY KEY (a));
INSERT INTO t1 VALUES (1,'-----'), (6,'Allemagne'), (17,'Autriche'),
- (25,'Belgique'), (54,'Danemark'), (62,'Espagne'), (68,'France');
+ (25,'Belgique'), (54,'Danemark'), (62,'Espagne'), (68,'France'),
+ (100,"No land"), (101,"No land");
CREATE TABLE t2 (a tinyint(3), b tinyint(3), PRIMARY KEY (a), KEY b (b));
@@ -1164,6 +1167,13 @@ SELECT a from t2 GROUP BY a;
EXPLAIN SELECT b from t2 GROUP BY b;
SELECT b from t2 GROUP BY b;
+# Show that we are using 'range' when there is more NULL rows in the table
+insert into t2 SELECT NULL, NULL from seq_1_to_10;
+EXPLAIN SELECT b from t2 GROUP BY a;
+--echo # Expect: Using index for group-by
+analyze table t2;
+EXPLAIN SELECT b from t2 GROUP BY a;
+
DROP TABLE t1;
DROP TABLE t2;
@@ -2124,3 +2134,22 @@ drop table t20, t21, t22;
--echo #
--echo # End of 10.3 tests
--echo #
+
+--echo #
+--echo # Test new group_min_max optimization
+--echo #
+
+create table t1 (a int, b int, c int, key(a,b,c));
+insert into t1 select mod(seq,23),mod(seq,13), mod(seq,5) from seq_1_to_10000;
+
+explain select a from t1 where a in (1,2,3) group by a;
+explain select a from t1 where a in (1,2,3) or a = 22 group by a;
+explain select a from t1 where a in (1,2,3) and a < 3 group by a;
+explain select a,b from t1 where (a) in (1,2,3) and b in (5,6,7) group by a,b;
+explain select a,b from t1 where (a,b) in ((1,1),(2,2),(3,3)) group by a,b;
+explain select a,b,c from t1 where (a,b) in ((1,1),(2,2),(3,3)) and c=3 group by a,b,c;
+
+--echo # Will not use index for group-by
+explain select a from t1 where a in (1,2,3) and b>1 group by a;
+explain select a from t1 where a in (1,2,3) and c=1 group by a;
+drop table t1;
diff --git a/mysql-test/main/group_min_max.result b/mysql-test/main/group_min_max.result
index 2b8b10b29b9..a227246e7ec 100644
--- a/mysql-test/main/group_min_max.result
+++ b/mysql-test/main/group_min_max.result
@@ -2230,10 +2230,10 @@ a
BB
EXPLAIN SELECT a FROM t1 WHERE a='AA' GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref PRIMARY PRIMARY 7 const 4 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 7 NULL 1 Using where; Using index for group-by
EXPLAIN SELECT a FROM t1 WHERE a='BB' GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref PRIMARY PRIMARY 7 const 1 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 7 NULL 1 Using where; Using index for group-by
SELECT DISTINCT a FROM t1 WHERE a='BB';
a
BB
@@ -2353,6 +2353,9 @@ t1;
id2 id3 id5 id4 id3 id6 id5 id1
1 1 1 1 1 1 1 1
DROP TABLE t1,t2,t3,t4,t5,t6;
+#
+# Bug#22342: No results returned for query using max and group by
+#
CREATE TABLE t1 (a int, b int, KEY (a,b), KEY b (b));
INSERT INTO t1 VALUES
(1,1),(1,2),(1,0),(1,3),
@@ -2361,20 +2364,31 @@ ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+CREATE TABLE t2 (a int, b int, c int, PRIMARY KEY (a,b,c));
+INSERT INTO t2 SELECT a,b,b FROM t1;
explain SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a,b a 10 NULL 2 Using where; Using index for group-by
+1 SIMPLE t1 range a,b a 10 NULL 1 Using where; Using index for group-by
+insert into t1 select 1,seq from seq_1_to_100;
+explain SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a,b a 10 NULL 1 Using where; Using index for group-by
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+explain SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a,b a 10 NULL 1 Using where; Using index for group-by
SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
MAX(b) a
1 1
SELECT MIN(b), a FROM t1 WHERE b > 1 AND a = 1 GROUP BY a;
MIN(b) a
2 1
-CREATE TABLE t2 (a int, b int, c int, PRIMARY KEY (a,b,c));
-INSERT INTO t2 SELECT a,b,b FROM t1;
explain SELECT MIN(c) FROM t2 WHERE b = 2 and a = 1 and c > 1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range PRIMARY PRIMARY 12 NULL 1 Using where; Using index
+1 SIMPLE t2 range PRIMARY PRIMARY 12 NULL 1 Using where; Using index for group-by
SELECT MIN(c) FROM t2 WHERE b = 2 and a = 1 and c > 1 GROUP BY a;
MIN(c)
2
@@ -2446,7 +2460,7 @@ EXPLAIN SELECT (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) x
FROM t1 AS t1_outer;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1_outer index NULL a 10 NULL 15 Using index
-2 SUBQUERY t1 range a a 5 NULL 5 Using where; Using index
+2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE EXISTS
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2);
id select_type table type possible_keys key key_len ref rows Extra
@@ -2456,31 +2470,31 @@ EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) > 12;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t1 range a a 5 NULL 5 Using where; Using index
+2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE
a IN (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1_outer index a a 10 NULL 15 Using where; Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1_outer.a 1
-2 MATERIALIZED t1 range a a 5 NULL 5 Using where; Using index
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
+1 PRIMARY t1_outer ref a a 5 <subquery2>.max(b) 3 Using index
+2 MATERIALIZED t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT 1 FROM t1 AS t1_outer GROUP BY a HAVING
a > (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1_outer range NULL a 5 NULL 6 Using index for group-by
-2 SUBQUERY t1 range a a 5 NULL 5 Using where; Using index
+2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT 1 FROM t1 AS t1_outer1 JOIN t1 AS t1_outer2
ON t1_outer1.a = (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2)
AND t1_outer1.b = t1_outer2.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1_outer1 ref a a 5 const 1 Using where; Using index
1 PRIMARY t1_outer2 index NULL a 10 NULL 15 Using where; Using index; Using join buffer (flat, BNL join)
-2 SUBQUERY t1 range a a 5 NULL 5 Using where; Using index
+2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT (SELECT (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) x
FROM t1 AS t1_outer) x2 FROM t1 AS t1_outer2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1_outer2 index NULL a 10 NULL 15 Using index
2 SUBQUERY t1_outer index NULL a 10 NULL 15 Using index
-3 SUBQUERY t1 range a a 5 NULL 5 Using where; Using index
+3 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
CREATE TABLE t3 LIKE t1;
FLUSH STATUS;
INSERT INTO t3 SELECT a,MAX(b) FROM t1 GROUP BY a;
@@ -2664,7 +2678,7 @@ a b
3 13
explain extended select sql_buffer_result a, max(b)+1 from t1 where a = 0 group by a;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range a,index a 5 NULL 3 100.00 Using where; Using index for group-by; Using temporary
+1 SIMPLE t1 range a,index a 5 NULL 1 100.00 Using where; Using index for group-by; Using temporary
Warnings:
Note 1003 select sql_buffer_result `test`.`t1`.`a` AS `a`,max(`test`.`t1`.`b`) + 1 AS `max(b)+1` from `test`.`t1` where `test`.`t1`.`a` = 0 group by `test`.`t1`.`a`
drop table t1;
@@ -3565,7 +3579,7 @@ a c COUNT(DISTINCT c, a, b)
EXPLAIN SELECT COUNT(DISTINCT c, a, b) FROM t2
WHERE a > 5 AND b BETWEEN 10 AND 20 GROUP BY a, b, c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 5 NULL 1 Using where; Using index
+1 SIMPLE t2 range a a 15 NULL 1 Using where; Using index for group-by
SELECT COUNT(DISTINCT c, a, b) FROM t2
WHERE a > 5 AND b BETWEEN 10 AND 20 GROUP BY a, b, c;
COUNT(DISTINCT c, a, b)
@@ -3657,14 +3671,34 @@ KEY (f1,f2)
) ;
insert into t1 values(1,'A'),(1 , 'B'), (1, 'C'), (2, 'A'),
(3, 'A'), (3, 'B'), (3, 'C'), (3, 'D');
+explain SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL f1 5 NULL 8 Using index for group-by
SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
f1 COUNT(DISTINCT f2)
1 3
2 1
3 4
+insert into t1 select seq/10,char(64+mod(seq,4)) from seq_1_to_100;
+explain SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL f1 5 NULL 10 Using index for group-by
+SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
+f1 COUNT(DISTINCT f2)
+0 4
+1 4
+2 4
+3 5
+4 4
+5 4
+6 4
+7 4
+8 4
+9 4
+10 4
explain SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL f1 5 NULL 9 Using index for group-by
+1 SIMPLE t1 range NULL f1 5 NULL 10 Using index for group-by
drop table t1;
# End of test#50539.
#
@@ -4092,7 +4126,7 @@ CREATE TABLE t1 (p int NOT NULL, a int NOT NULL, PRIMARY KEY (p,a));
insert into t1 select 2,seq from seq_0_to_1000;
EXPLAIN select MIN(a) from t1 where p = 2 group by p;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 10 Using where; Using index for group-by
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where; Using index for group-by
SELECT MIN(a) from t1 where p = 2 group by p;
MIN(a)
0
diff --git a/mysql-test/main/group_min_max.test b/mysql-test/main/group_min_max.test
index 0fa91e4d72b..d230cd15749 100644
--- a/mysql-test/main/group_min_max.test
+++ b/mysql-test/main/group_min_max.test
@@ -899,23 +899,27 @@ t1;
DROP TABLE t1,t2,t3,t4,t5,t6;
-#
-# Bug#22342: No results returned for query using max and group by
-#
+--echo #
+--echo # Bug#22342: No results returned for query using max and group by
+--echo #
CREATE TABLE t1 (a int, b int, KEY (a,b), KEY b (b));
INSERT INTO t1 VALUES
(1,1),(1,2),(1,0),(1,3),
(1,-1),(1,-2),(1,-3),(1,-4);
ANALYZE TABLE t1;
+CREATE TABLE t2 (a int, b int, c int, PRIMARY KEY (a,b,c));
+INSERT INTO t2 SELECT a,b,b FROM t1;
explain SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
+insert into t1 select 1,seq from seq_1_to_100;
+explain SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
+analyze table t1;
+explain SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
+
SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
SELECT MIN(b), a FROM t1 WHERE b > 1 AND a = 1 GROUP BY a;
-CREATE TABLE t2 (a int, b int, c int, PRIMARY KEY (a,b,c));
-INSERT INTO t2 SELECT a,b,b FROM t1;
explain SELECT MIN(c) FROM t2 WHERE b = 2 and a = 1 and c > 1 GROUP BY a;
SELECT MIN(c) FROM t2 WHERE b = 2 and a = 1 and c > 1 GROUP BY a;
-
DROP TABLE t1,t2;
#
@@ -1388,6 +1392,7 @@ SELECT a, c, COUNT(DISTINCT c, a, b) FROM t2 GROUP BY a, b, c;
EXPLAIN SELECT COUNT(DISTINCT c, a, b) FROM t2
WHERE a > 5 AND b BETWEEN 10 AND 20 GROUP BY a, b, c;
+
SELECT COUNT(DISTINCT c, a, b) FROM t2
WHERE a > 5 AND b BETWEEN 10 AND 20 GROUP BY a, b, c;
@@ -1439,10 +1444,12 @@ CREATE TABLE t1 (
) ;
insert into t1 values(1,'A'),(1 , 'B'), (1, 'C'), (2, 'A'),
(3, 'A'), (3, 'B'), (3, 'C'), (3, 'D');
-
+explain SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
+SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
+insert into t1 select seq/10,char(64+mod(seq,4)) from seq_1_to_100;
+explain SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
explain SELECT f1, COUNT(DISTINCT f2) FROM t1 GROUP BY f1;
-
drop table t1;
--echo # End of test#50539.
diff --git a/mysql-test/main/group_min_max_innodb.result b/mysql-test/main/group_min_max_innodb.result
index 3586ad5237f..2b42ac1cbe1 100644
--- a/mysql-test/main/group_min_max_innodb.result
+++ b/mysql-test/main/group_min_max_innodb.result
@@ -73,10 +73,10 @@ insert into t1 values ( 1,"e"),(2,"a"),( 3,"c"),(4,"d");
alter table t1 drop primary key, add primary key (f2, f1);
explain select distinct f1 a, f1 b from t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 5 NULL 4 Using index; Using temporary
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4 Using temporary
explain select distinct f1, f2 from t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 5 NULL 4 Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4
drop table t1;
create table t1(pk int primary key) engine=innodb;
create view v1 as select pk from t1 where pk < 20;
@@ -108,7 +108,7 @@ CREATE TABLE t1 (a CHAR(1), b CHAR(1), PRIMARY KEY (a,b)) ENGINE=InnoDB;
INSERT INTO t1 VALUES ('a', 'b'), ('c', 'd');
EXPLAIN SELECT COUNT(DISTINCT a) FROM t1 WHERE b = 'b';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 2 NULL 2 Using where; Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(DISTINCT a) FROM t1 WHERE b = 'b';
COUNT(DISTINCT a)
1
@@ -118,7 +118,7 @@ ENGINE=InnoDB;
INSERT INTO t1 VALUES ('a', 'b'), ('c', 'd');
EXPLAIN SELECT COUNT(DISTINCT a) FROM t1 WHERE b = 'b';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL a 2 NULL 2 Using where; Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(DISTINCT a) FROM t1 WHERE b = 'b';
COUNT(DISTINCT a)
1
@@ -162,7 +162,7 @@ ANALYZE TABLE t2;
EXPLAIN SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' AND i2 = 17) OR ( c1 = 'F')
GROUP BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range k1 k1 5 NULL 31 Using where; Using index
+1 SIMPLE t1 range k1 k1 5 NULL 31 Using where
SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' AND i2 = 17) OR ( c1 = 'F')
GROUP BY c1;
c1 max(i2)
@@ -171,7 +171,7 @@ F 30
EXPLAIN SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR ( c1 = 'F' AND i2 = 17))
GROUP BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range k1 k1 5 NULL 31 Using where; Using index
+1 SIMPLE t1 range k1 k1 5 NULL 31 Using where
SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR ( c1 = 'F' AND i2 = 17))
GROUP BY c1;
c1 max(i2)
@@ -180,7 +180,7 @@ F 17
EXPLAIN SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR c1 = 'F' ) AND ( i2 = 17 )
GROUP BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range k1 k1 5 NULL 1 Using where; Using index for group-by
+1 SIMPLE t1 range k1 k1 5 NULL 2 Using where
SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR c1 = 'F' ) AND ( i2 = 17 )
GROUP BY c1;
c1 max(i2)
@@ -190,7 +190,7 @@ EXPLAIN SELECT c1, max(i2) FROM t1
WHERE ((c1 = 'C' AND (i2 = 40 OR i2 = 30)) OR ( c1 = 'F' AND (i2 = 40 )))
GROUP BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range k1 k1 5 NULL 3 Using where; Using index
+1 SIMPLE t1 range k1 k1 5 NULL 3 Using where
SELECT c1, max(i2) FROM t1
WHERE ((c1 = 'C' AND (i2 = 40 OR i2 = 30)) OR ( c1 = 'F' AND (i2 = 40 )))
GROUP BY c1;
@@ -200,7 +200,7 @@ EXPLAIN SELECT c1, i1, max(i2) FROM t2
WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )
GROUP BY c1,i1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k2 k2 9 NULL 60 Using where; Using index for group-by
+1 SIMPLE t2 range k2 k2 5 NULL 60 Using where
SELECT c1, i1, max(i2) FROM t2
WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )
GROUP BY c1,i1;
@@ -211,7 +211,7 @@ EXPLAIN SELECT c1, i1, max(i2) FROM t2
WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ))
GROUP BY c1,i1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k2 k2 9 NULL 60 Using where; Using index for group-by
+1 SIMPLE t2 range k2 k2 5 NULL 60 Using where
SELECT c1, i1, max(i2) FROM t2
WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ))
GROUP BY c1,i1;
@@ -222,7 +222,7 @@ EXPLAIN SELECT c1, i1, max(i2) FROM t2
WHERE ((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35) OR ( i2 = 17 ))
GROUP BY c1,i1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 index k2 k2 9 NULL 180 Using where; Using index
+1 SIMPLE t2 index k2 k2 9 NULL 180 Using where
SELECT c1, i1, max(i2) FROM t2
WHERE ((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35) OR ( i2 = 17 ))
GROUP BY c1,i1;
diff --git a/mysql-test/main/having_cond_pushdown.result b/mysql-test/main/having_cond_pushdown.result
index 59388aa7ca0..e1f49b075b7 100644
--- a/mysql-test/main/having_cond_pushdown.result
+++ b/mysql-test/main/having_cond_pushdown.result
@@ -34,6 +34,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -42,7 +43,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 2"
}
@@ -60,6 +63,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -68,7 +72,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 2"
}
@@ -105,12 +111,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 2"
}
@@ -126,12 +135,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 2"
}
@@ -168,6 +180,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -176,7 +189,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a < 4"
}
@@ -194,6 +209,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -202,7 +218,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a < 4"
}
@@ -243,6 +261,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -251,7 +270,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 or t1.a = 3"
}
@@ -271,6 +292,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -279,7 +301,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 or t1.a = 3"
}
@@ -319,6 +343,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a > 2 and max(t1.b) > 13 or t1.a < 3 and min(t1.c) > 1",
"filesort": {
"sort_key": "t1.a",
@@ -328,7 +353,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 2 or t1.a < 3"
}
@@ -347,6 +374,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a > 2 and max(t1.b) > 13 or t1.a < 3 and min(t1.c) > 1",
"filesort": {
"sort_key": "t1.a",
@@ -356,7 +384,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 2 or t1.a < 3"
}
@@ -393,6 +423,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.a) < 3",
"filesort": {
"sort_key": "t1.a",
@@ -402,7 +433,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -421,6 +454,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.a) < 3",
"filesort": {
"sort_key": "t1.a",
@@ -430,7 +464,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -466,6 +502,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b) > 13",
"filesort": {
"sort_key": "t1.a",
@@ -475,7 +512,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -494,6 +533,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b) > 13",
"filesort": {
"sort_key": "t1.a",
@@ -503,7 +543,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -539,13 +581,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.a) = 3",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 3"
}
@@ -562,13 +607,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.a) = 3",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 3"
}
@@ -602,13 +650,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b) > 12",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 2"
}
@@ -625,13 +676,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b) > 12",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 2"
}
@@ -665,6 +719,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b) = 13",
"filesort": {
"sort_key": "t1.a",
@@ -674,7 +729,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -693,6 +750,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b) = 13",
"filesort": {
"sort_key": "t1.a",
@@ -702,7 +760,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -740,6 +800,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "min(t1.c) < 3",
"filesort": {
"sort_key": "t1.a",
@@ -749,7 +810,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -768,6 +831,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "min(t1.c) < 3",
"filesort": {
"sort_key": "t1.a",
@@ -777,7 +841,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -813,13 +879,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b) = 13 and min(t1.c) = 2",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 2"
}
@@ -836,13 +905,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b) = 13 and min(t1.c) = 2",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 2"
}
@@ -877,6 +949,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a = test.f1()",
"filesort": {
"sort_key": "t1.a",
@@ -886,7 +959,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -905,6 +980,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a = test.f1()",
"filesort": {
"sort_key": "t1.a",
@@ -914,7 +990,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -948,7 +1026,7 @@ GROUP BY v1.a
HAVING (v1.a>1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where; Using temporary; Using filesort
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.x 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.x 1
2 DERIVED t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
explain format=json SELECT v1.a
FROM t2,v1
@@ -959,6 +1037,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "v1.a",
"temporary_table": {
@@ -967,7 +1046,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.x > 1 and t2.x is not null"
}
@@ -981,11 +1062,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.x"],
- "rows": 2,
+ "loops": 4,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -994,7 +1078,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -1019,6 +1105,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "v1.a",
"temporary_table": {
@@ -1027,7 +1114,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.x > 1 and t2.x is not null"
}
@@ -1041,11 +1130,14 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.x"],
- "rows": 2,
+ "loops": 4,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -1054,7 +1146,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -1095,7 +1189,7 @@ GROUP BY v1.c
HAVING (v1.c>2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where; Using temporary; Using filesort
-1 PRIMARY <derived2> ref key0 key0 5 test.t2.x 2 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.x 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort
explain format=json SELECT v1.a,v1.c
FROM t2,v1
@@ -1106,6 +1200,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "v1.c",
"temporary_table": {
@@ -1114,7 +1209,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.x is not null"
}
@@ -1128,12 +1225,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.x"],
- "rows": 2,
+ "loops": 4,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c > 2",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "t1.c > 2",
"filesort": {
"sort_key": "t1.a",
@@ -1143,7 +1243,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1167,6 +1269,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "v1.c",
"temporary_table": {
@@ -1175,7 +1278,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.x is not null"
}
@@ -1189,12 +1294,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.x"],
- "rows": 2,
+ "loops": 4,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v1.c > 2",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "t1.c > 2",
"filesort": {
"sort_key": "t1.a",
@@ -1204,7 +1312,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1256,6 +1366,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -1264,7 +1375,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null"
}
@@ -1278,19 +1391,24 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["x", "MAX(t2.y)"],
"ref": ["test.t1.a", "test.t1.b"],
+ "loops": 5,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.x < 5 and t2.x > 1"
}
@@ -1315,6 +1433,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -1323,7 +1442,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null"
}
@@ -1337,19 +1458,24 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["x", "MAX(t2.y)"],
"ref": ["test.t1.a", "test.t1.b"],
+ "loops": 5,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.x < 5 and t2.x > 1"
}
@@ -1399,6 +1525,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.b",
"temporary_table": {
@@ -1407,7 +1534,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b < 14 and t1.a is not null and t1.b is not null"
}
@@ -1421,12 +1550,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["x", "MAX(t2.y)"],
"ref": ["test.t1.a", "test.t1.b"],
+ "loops": 5,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "`MAX(t2.y)` < 14",
"temporary_table": {
"nested_loop": [
@@ -1434,7 +1566,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.x < 5"
}
@@ -1459,6 +1593,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.b",
"temporary_table": {
@@ -1467,7 +1602,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b < 14 and t1.a is not null and t1.b is not null"
}
@@ -1481,12 +1618,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["x", "MAX(t2.y)"],
"ref": ["test.t1.a", "test.t1.b"],
+ "loops": 5,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "`MAX(t2.y)` < 14",
"temporary_table": {
"nested_loop": [
@@ -1494,7 +1634,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.x < 5"
}
@@ -1538,6 +1680,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.c = 2",
"filesort": {
"sort_key": "t1.a",
@@ -1547,7 +1690,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -1565,6 +1710,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.c = 2",
"filesort": {
"sort_key": "t1.a",
@@ -1574,7 +1720,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1"
}
@@ -1610,13 +1758,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a = 2 and t1.c = 2",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 13"
}
@@ -1633,13 +1784,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a = 2 and t1.c = 2",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 13"
}
@@ -1674,6 +1828,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -1682,7 +1837,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and t1.a > 1"
}
@@ -1699,6 +1856,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -1707,7 +1865,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and t1.a > 1"
}
@@ -1743,12 +1903,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 2 and t1.c = 2"
}
@@ -1764,12 +1927,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 2 and t1.c = 2"
}
@@ -1803,6 +1969,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.c = t1.a and t1.a > 1 or t1.a < 3 and t1.c > 3",
"filesort": {
"sort_key": "t1.a",
@@ -1812,7 +1979,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and t1.a > 1 or t1.a < 3"
}
@@ -1831,6 +2000,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.c = t1.a and t1.a > 1 or t1.a < 3 and t1.c > 3",
"filesort": {
"sort_key": "t1.a",
@@ -1840,7 +2010,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and t1.a > 1 or t1.a < 3"
}
@@ -1883,6 +2055,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -1891,7 +2064,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and t1.a < 3"
}
@@ -1909,6 +2084,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -1917,7 +2093,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and t1.a < 3"
}
@@ -1958,6 +2136,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -1966,7 +2145,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and t1.a > 1 and t1.a < 3"
}
@@ -1984,6 +2165,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -1992,7 +2174,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and t1.a > 1 and t1.a < 3"
}
@@ -2032,6 +2216,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a > 1 and max(t1.c) < 3 or t1.c < 4",
"filesort": {
"sort_key": "t1.a",
@@ -2041,7 +2226,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and (t1.a > 1 or t1.a < 4) and t1.a < 2"
}
@@ -2060,6 +2247,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a > 1 and max(t1.c) < 3 or t1.c < 4",
"filesort": {
"sort_key": "t1.a",
@@ -2069,7 +2257,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and (t1.a > 1 or t1.a < 4) and t1.a < 2"
}
@@ -2112,6 +2302,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a > 1 and max(t1.c) < 3 or t1.c < 4",
"filesort": {
"sort_key": "t1.a",
@@ -2121,7 +2312,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and (t1.a > 1 or t1.a < 4)"
}
@@ -2140,6 +2333,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a > 1 and max(t1.c) < 3 or t1.c < 4",
"filesort": {
"sort_key": "t1.a",
@@ -2149,7 +2343,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = t1.a and (t1.a > 1 or t1.a < 4)"
}
@@ -2208,6 +2404,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.b = 13 and max(t1.c) > 2",
"filesort": {
"sort_key": "t1.a",
@@ -2217,7 +2414,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2234,6 +2433,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.b = 13 and max(t1.c) > 2",
"filesort": {
"sort_key": "t1.a",
@@ -2243,7 +2443,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2281,6 +2483,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2289,7 +2492,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a = 1 or t1.b > 10) and t1.b < 14"
}
@@ -2307,6 +2512,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2315,7 +2521,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a = 1 or t1.b > 10) and t1.b < 14"
}
@@ -2355,6 +2563,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2363,7 +2572,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a = 1 or t1.b > 10) and (t1.b < 14 or t1.b > 15)"
}
@@ -2381,6 +2592,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2389,7 +2601,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a = 1 or t1.b > 10) and (t1.b < 14 or t1.b > 15)"
}
@@ -2428,6 +2642,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2436,7 +2651,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a = 1 or t1.b > 10) and (t1.b < 14 or t1.a = 2 and t1.b > 15)"
}
@@ -2454,6 +2671,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2462,7 +2680,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a = 1 or t1.b > 10) and (t1.b < 14 or t1.a = 2 and t1.b > 15)"
}
@@ -2503,6 +2723,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2511,7 +2732,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 2 or t1.b = 13 and t1.a > 2"
}
@@ -2529,6 +2752,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2537,7 +2761,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 2 or t1.b = 13 and t1.a > 2"
}
@@ -2575,6 +2801,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2583,7 +2810,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 14 and t1.a < 2 or t1.b = 13 and t1.a > 2"
}
@@ -2601,6 +2830,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2609,7 +2839,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 14 and t1.a < 2 or t1.b = 13 and t1.a > 2"
}
@@ -2649,6 +2881,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2657,7 +2890,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 14 and t1.a < 2 or t1.a > 2 and (t1.b = 13 or t1.b = 14)"
}
@@ -2675,6 +2910,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -2683,7 +2919,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 14 and t1.a < 2 or t1.a > 2 and (t1.b = 13 or t1.b = 14)"
}
@@ -2719,6 +2957,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a < 2 and max(t1.c) = 2 or max(t1.c) > 2 and (t1.a = 1 or t1.a = 2)",
"filesort": {
"sort_key": "t1.a",
@@ -2728,7 +2967,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 2 or t1.a = 1 or t1.a = 2"
}
@@ -2747,6 +2988,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a < 2 and max(t1.c) = 2 or max(t1.c) > 2 and (t1.a = 1 or t1.a = 2)",
"filesort": {
"sort_key": "t1.a",
@@ -2756,7 +2998,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 2 or t1.a = 1 or t1.a = 2"
}
@@ -2794,6 +3038,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a = 2 and max(t1.c) = 2 or max(t1.c) > 2 and (t1.a = 1 or t1.a = 2)",
"filesort": {
"sort_key": "t1.a",
@@ -2803,7 +3048,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 2 or t1.a = 1 or t1.a = 2"
}
@@ -2822,6 +3069,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.a = 2 and max(t1.c) = 2 or max(t1.c) > 2 and (t1.a = 1 or t1.a = 2)",
"filesort": {
"sort_key": "t1.a",
@@ -2831,7 +3079,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 2 or t1.a = 1 or t1.a = 2"
}
@@ -2868,13 +3118,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.c) = 3",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1"
}
@@ -2891,13 +3144,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.c) = 3",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1"
}
@@ -2932,13 +3188,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b = 14)",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c = 3"
}
@@ -2955,13 +3214,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b) = 14",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c = 3"
}
@@ -2995,13 +3257,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b = 14)",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c = 1"
}
@@ -3018,13 +3283,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "max(t1.b) = 14",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c = 1"
}
@@ -3068,6 +3336,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -3076,7 +3345,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 2 and t1.a < 3"
}
@@ -3094,6 +3365,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -3102,7 +3374,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 2 and t1.a < 3"
}
@@ -3143,12 +3417,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 3 and t1.b > 2"
}
@@ -3164,12 +3441,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 3 and t1.b > 2"
}
@@ -3208,6 +3488,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -3216,7 +3497,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 14 and t1.a < 3"
}
@@ -3234,6 +3517,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -3242,7 +3526,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 14 and t1.a < 3"
}
@@ -3283,12 +3569,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.b = 14"
}
@@ -3304,12 +3593,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = 14 and t1.a = 1"
}
@@ -3395,12 +3687,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c = 1"
}
@@ -3416,12 +3711,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = 1 and t1.a = 1"
}
@@ -3462,6 +3760,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -3470,7 +3769,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c > 0 and t1.c < 3 and t1.a > 1"
}
@@ -3488,6 +3789,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -3496,7 +3798,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c > 0 and t1.c < 3 and t1.a > 1"
}
@@ -3537,12 +3841,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c > 0 and t1.c < 3"
}
@@ -3558,12 +3865,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c > 0 and t1.c < 3"
}
@@ -3602,13 +3912,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c < 3"
}
@@ -3624,12 +3937,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c < 3"
}
@@ -3713,12 +4029,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.b = 2 and t3.d = 1 and t3.a = 1"
}
@@ -3734,12 +4053,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.b = 2 and t3.d = 1 and t3.a = 1"
}
@@ -3778,6 +4100,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -3786,7 +4109,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 2"
}
@@ -3804,6 +4129,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -3812,7 +4138,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 2"
}
@@ -3901,6 +4229,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -3909,7 +4238,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a < 4 and t1.a > 0"
}
@@ -3927,6 +4258,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -3935,7 +4267,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a < 4 and t1.a > 0"
}
@@ -3976,13 +4310,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1 and 1",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1"
}
@@ -3998,12 +4335,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1"
}
@@ -4044,6 +4384,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -4052,7 +4393,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and (t1.a < 4 or t1.a > 0)"
}
@@ -4070,6 +4413,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -4078,7 +4422,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and (t1.a < 4 or t1.a > 0)"
}
@@ -4119,13 +4465,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1"
}
@@ -4141,12 +4490,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1"
}
@@ -4187,6 +4539,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.c > 1",
"filesort": {
"sort_key": "t1.a",
@@ -4196,7 +4549,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.c < 3 and t1.a < 4"
}
@@ -4216,6 +4571,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.c > 1",
"filesort": {
"sort_key": "t1.a",
@@ -4225,7 +4581,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.c < 3 and t1.a < 4"
}
@@ -4263,6 +4621,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"filesort": {
"sort_key": "t1.c",
@@ -4272,7 +4631,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c < 3 and t1.c > 1"
}
@@ -4291,6 +4652,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.c",
"temporary_table": {
@@ -4299,7 +4661,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c < 3 and t1.c > 1"
}
@@ -4339,13 +4703,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1 and 1",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c = 3"
}
@@ -4362,12 +4729,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c = 3"
}
@@ -4405,13 +4775,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t3.d > 0",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.a = 1 and t3.d = 1 and t3.b = 2"
}
@@ -4429,13 +4802,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t3.d > 0",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.a = 1 and t3.d = 1 and t3.b = 2"
}
@@ -4473,13 +4849,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.a = 1 and t3.d = 1 and t3.b = 2"
}
@@ -4496,12 +4875,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.a = 1 and t3.d = 1 and t3.b = 2"
}
@@ -4542,6 +4924,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.c > 1",
"filesort": {
"sort_key": "t1.a",
@@ -4551,7 +4934,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 4"
}
@@ -4571,6 +4956,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "t1.c > 1",
"filesort": {
"sort_key": "t1.a",
@@ -4580,7 +4966,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 4"
}
@@ -4624,6 +5012,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.c",
"temporary_table": {
@@ -4632,7 +5021,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 4 and t1.c > 1"
}
@@ -4651,6 +5042,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.c",
"temporary_table": {
@@ -4659,7 +5051,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 4 and t1.c > 1"
}
@@ -4703,6 +5097,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.c",
"temporary_table": {
@@ -4711,7 +5106,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a = 1 or t1.a = 3) and (t1.a = 4 or t1.c > 1)"
}
@@ -4730,6 +5127,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.c",
"temporary_table": {
@@ -4738,7 +5136,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a = 1 or t1.a = 3) and (t1.a = 4 or t1.c > 1)"
}
@@ -4779,12 +5179,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c = 1"
}
@@ -4800,12 +5203,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c = 1"
}
@@ -4844,12 +5250,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c = 1"
}
@@ -4866,12 +5275,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.c = 1"
}
@@ -5051,6 +5463,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t3.a",
"temporary_table": {
@@ -5059,7 +5472,9 @@ EXPLAIN
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.d = t3.a and t3.a > 1 and (t3.c = 3 or t3.c < 2) and (t3.a = 1 or t3.a > 1)"
}
@@ -5078,6 +5493,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t3.a",
"temporary_table": {
@@ -5086,7 +5502,9 @@ EXPLAIN
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.d = t3.a and t3.a > 1 and (t3.c = 3 or t3.c < 2) and (t3.a = 1 or t3.a > 1)"
}
@@ -5125,6 +5543,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t3.a",
"temporary_table": {
@@ -5133,7 +5552,9 @@ EXPLAIN
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.d = t3.a and t3.a > 1 and (t3.c = 3 or t3.c < 2) and (t3.c = t3.a and t3.c < 15 or t3.a > 1)"
}
@@ -5152,6 +5573,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t3.a",
"temporary_table": {
@@ -5160,7 +5582,9 @@ EXPLAIN
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.d = t3.a and t3.a > 1 and (t3.c = 3 or t3.c < 2) and (t3.c = t3.a and t3.a < 15 or t3.a > 1)"
}
@@ -5240,6 +5664,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.d1",
"temporary_table": {
@@ -5248,7 +5673,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.d1 between <cache>(inet_aton('1978-04-27')) and <cache>('2018-08-26')"
}
@@ -5282,6 +5709,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.d1",
"temporary_table": {
@@ -5290,7 +5718,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.d1 not between <cache>(0) and <cache>(exp(0))"
}
@@ -5357,6 +5787,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -5365,7 +5796,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 8 or t1.a = (subquery#2)"
}
@@ -5375,12 +5808,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -5401,6 +5837,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -5409,7 +5846,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a = 8 or t1.a = (subquery#2)) and t1.b < 20"
}
@@ -5419,12 +5858,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -5445,6 +5887,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "sum(t1.b) > 20",
"filesort": {
"sort_key": "t1.a",
@@ -5454,7 +5897,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 8 or t1.a = (subquery#2)"
}
@@ -5464,12 +5909,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -5490,12 +5938,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = (subquery#2)"
}
@@ -5505,12 +5956,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
diff --git a/mysql-test/main/having_cond_pushdown.test b/mysql-test/main/having_cond_pushdown.test
index 99e4a597f22..be18699519e 100644
--- a/mysql-test/main/having_cond_pushdown.test
+++ b/mysql-test/main/having_cond_pushdown.test
@@ -23,12 +23,14 @@ HAVING (t1.a>2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b)
FROM t1
WHERE (t1.a>2)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # conjunctive subformula : using equality
@@ -40,12 +42,14 @@ HAVING (t1.a=2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b)
FROM t1
WHERE (t1.a=2)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # extracted AND formula
@@ -57,12 +61,14 @@ HAVING (t1.a>1) AND (t1.a<4);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b)
FROM t1
WHERE (t1.a>1) AND (t1.a<4)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # extracted OR formula
@@ -75,6 +81,7 @@ eval $no_pushdown $query;
eval $query;
--enable_prepare_warnings
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
--disable_prepare_warnings
let $query=
@@ -83,6 +90,7 @@ FROM t1
WHERE (t1.a>1) OR (a IN (SELECT 3))
GROUP BY t1.a;
--enable_prepare_warnings
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--disable_prepare_warnings
@@ -94,6 +102,7 @@ HAVING ((t1.a>2) AND (MAX(t1.b)>13)) OR ((t1.a<3) AND (MIN(t1.c)>1));
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -101,6 +110,7 @@ FROM t1
WHERE (t1.a>2) OR (t1.a<3)
GROUP BY t1.a
HAVING ((t1.a>2) AND (MAX(t1.b)>13)) OR ((t1.a<3) AND (MIN(t1.c)>1));
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # conjunctive subformula : no aggregation formula pushdown
@@ -112,6 +122,7 @@ HAVING (t1.a>1) AND (MAX(t1.a)<3);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b)
@@ -119,6 +130,7 @@ FROM t1
WHERE (t1.a>1)
GROUP BY t1.a
HAVING (MAX(t1.a)<3);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -129,6 +141,7 @@ HAVING (t1.a>1) AND (MAX(t1.b)>13);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b)
@@ -136,6 +149,7 @@ FROM t1
WHERE (t1.a>1)
GROUP BY t1.a
HAVING (MAX(t1.b)>13);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -146,6 +160,7 @@ HAVING (t1.a=3) AND (MAX(t1.a)=3);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b)
@@ -153,6 +168,7 @@ FROM t1
WHERE (t1.a=3)
GROUP BY t1.a
HAVING (MAX(t1.a)=3);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -163,6 +179,7 @@ HAVING (t1.a=2) AND (MAX(t1.b)>12);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b)
@@ -170,6 +187,7 @@ FROM t1
WHERE (t1.a=2)
GROUP BY t1.a
HAVING (MAX(t1.b)>12);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -180,6 +198,7 @@ HAVING (t1.a>1) AND (MAX(t1.b)=13);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b)
@@ -187,6 +206,7 @@ FROM t1
WHERE (t1.a>1)
GROUP BY t1.a
HAVING (MAX(t1.b)=13);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -197,6 +217,7 @@ HAVING (t1.a>1) AND (MIN(t1.c)<3);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MIN(t1.c)
@@ -204,6 +225,7 @@ FROM t1
WHERE (t1.a>1)
GROUP BY t1.a
HAVING (MIN(t1.c)<3);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -214,6 +236,7 @@ HAVING (t1.a=2) AND (MAX(t1.b)=13) AND (MIN(t1.c)=2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MIN(t1.c)
@@ -221,6 +244,7 @@ FROM t1
WHERE (t1.a=2)
GROUP BY t1.a
HAVING (MAX(t1.b)=13) AND (MIN(t1.c)=2);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # conjunctive subformula : no stored function pushdown
@@ -232,6 +256,7 @@ HAVING (t1.a>1) AND (a=test.f1());
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b)
@@ -239,6 +264,7 @@ FROM t1
WHERE (t1.a>1)
GROUP BY t1.a
HAVING (a=test.f1());
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # conjunctive subformula : pushdown into derived table WHERE clause
@@ -251,12 +277,14 @@ HAVING (v1.a>1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT v1.a
FROM t2,v1
WHERE (t2.x=v1.a) AND (v1.a>1)
GROUP BY v1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # conjunctive subformula : pushdown into derived table HAVING clause
@@ -269,12 +297,14 @@ HAVING (v1.c>2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT v1.a,v1.c
FROM t2,v1
WHERE (t2.x=v1.a) AND (v1.c>2)
GROUP BY v1.c;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # conjunctive subformula : pushdown into materialized IN subquery
@@ -288,6 +318,7 @@ HAVING (t1.a>1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT * FROM t1
@@ -295,6 +326,7 @@ WHERE
(t1.a>1) AND
(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # conjunctive subformula : pushdown into materialized IN subquery
@@ -308,6 +340,7 @@ HAVING (t1.b<14);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT * FROM t1
@@ -315,6 +348,7 @@ WHERE
(t1.b<14) AND
(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
GROUP BY t1.b;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # non-standard allowed queries
@@ -327,12 +361,14 @@ HAVING (t1.c=2) AND (t1.a>1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c FROM t1
WHERE (t1.a>1)
GROUP BY t1.a
HAVING (t1.c=2);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -343,6 +379,7 @@ HAVING (t1.a=2) AND (t1.b=13) AND (t1.c=2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT MAX(t1.a),t1.a,t1.b,t1.c
@@ -350,6 +387,7 @@ FROM t1
WHERE (t1.b=13)
GROUP BY t1.b
HAVING (t1.a=2) AND (t1.c=2);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # extracted AND formula : using equalities
@@ -361,11 +399,13 @@ HAVING (t1.a=t1.c) AND (t1.c>1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b) FROM t1
WHERE (t1.a=t1.c) AND (t1.a>1)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -376,12 +416,14 @@ HAVING (t1.a=t1.c) AND (t1.c=2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.a=t1.c) AND (t1.a=2)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -392,6 +434,7 @@ HAVING ((t1.a=t1.c) AND (t1.a>1)) OR ((t1.a<3) AND (t1.c>3));
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -399,6 +442,7 @@ FROM t1
WHERE ((t1.a=t1.c) AND (t1.a>1)) OR (t1.a<3)
GROUP BY t1.a
HAVING ((t1.a=t1.c) AND (t1.a>1)) OR ((t1.a<3) AND (t1.c>3));
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # conjuctive subformula : pushdown using WHERE multiple equalities
@@ -411,12 +455,14 @@ HAVING (t1.c<3);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.a=t1.c) AND (t1.c<3)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # extracted AND-formula : pushdown using WHERE multiple equalities
@@ -429,12 +475,14 @@ HAVING (t1.a>1) AND (t1.c<3);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.a=t1.c) AND (t1.a>1) AND (t1.c<3)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -446,6 +494,7 @@ HAVING (((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4)) AND (t1.a<2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -453,6 +502,7 @@ FROM t1
WHERE (t1.a=t1.c) AND (((t1.a>1) OR (t1.c<4)) AND (t1.a<2))
GROUP BY t1.a
HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # extracted OR-formula : pushdown using WHERE multiple equalities
@@ -465,6 +515,7 @@ HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -472,6 +523,7 @@ FROM t1
WHERE (t1.a=t1.c) AND ((t1.a>1) OR (t1.c<4))
GROUP BY t1.a
HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
DROP TABLE t1,t2;
@@ -510,12 +562,14 @@ HAVING t1.b = 13 AND MAX(t1.c) > 2;
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
FROM t1
GROUP BY t1.a
HAVING t1.b = 13 AND MAX(t1.c) > 2;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # extracted AND formula
@@ -527,12 +581,14 @@ HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
FROM t1
WHERE (t1.a = 1 OR t1.b > 10) AND (t1.b < 14)
GROUP BY t1.a,t1.b;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -543,12 +599,14 @@ HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR t1.b > 15);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
FROM t1
WHERE (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR t1.b > 15)
GROUP BY t1.a,t1.b;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # extracted AND formula : equality in the inner AND formula
@@ -560,12 +618,14 @@ HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR (t1.b > 15 AND t1.a = 2));
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
FROM t1
WHERE (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR (t1.b > 15 AND t1.a = 2))
GROUP BY t1.a,t1.b;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # extracted OR formula
@@ -577,12 +637,14 @@ HAVING (t1.a < 2) OR (t1.b = 13 AND t1.a > 2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
FROM t1
WHERE (t1.a < 2) OR (t1.b = 13 AND t1.a > 2)
GROUP BY t1.a,t1.b;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -593,12 +655,14 @@ HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND t1.b = 13);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
FROM t1
WHERE (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND t1.b = 13)
GROUP BY t1.a,t1.b;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -609,12 +673,14 @@ HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND (t1.b = 13 OR t1.b = 14));
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
FROM t1
WHERE (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND (t1.b = 13 OR t1.b = 14))
GROUP BY t1.a,t1.b;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -625,6 +691,7 @@ HAVING (t1.a < 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2)
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
@@ -632,6 +699,7 @@ FROM t1
WHERE (t1.a < 2) OR (t1.a = 1 OR t1.a = 2)
GROUP BY t1.a
HAVING (t1.a < 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -642,6 +710,7 @@ HAVING (t1.a = 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2)
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
@@ -649,6 +718,7 @@ FROM t1
WHERE (t1.a = 2) OR (t1.a = 1 OR t1.a = 2)
GROUP BY t1.a
HAVING (t1.a = 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # conjunctive subformula : equality pushdown
@@ -660,6 +730,7 @@ HAVING (t1.a = 1) AND (MAX(t1.c) = 3);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
@@ -667,6 +738,7 @@ FROM t1
WHERE (t1.a = 1)
GROUP BY t1.a
HAVING (MAX(t1.c) = 3);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # conjunctive subformula : equalities pushdown
@@ -678,6 +750,7 @@ HAVING (t1.a = 1) AND (t1.c = 3) AND MAX(t1.b = 14);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
@@ -685,6 +758,7 @@ FROM t1
WHERE (t1.a = 1) AND (t1.c = 3)
GROUP BY t1.a,t1.c
HAVING (MAX(t1.b) = 14);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # conjunctive subformula : multiple equality consists of
@@ -697,6 +771,7 @@ HAVING (t1.a = 1) AND (t1.c = 1) AND MAX(t1.b = 14);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
@@ -704,6 +779,7 @@ FROM t1
WHERE (t1.a = 1) AND (t1.c = 1)
GROUP BY t1.a,t1.c
HAVING (MAX(t1.b) = 14);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo #
@@ -720,12 +796,14 @@ HAVING (t1.a < 3);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
FROM t1
WHERE (t1.b > 2) AND (t1.a < 3)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # equality : inequality in WHERE
@@ -738,12 +816,14 @@ HAVING (t1.a = 3);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
FROM t1
WHERE (t1.b > 2) AND (t1.a = 3)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # inequality : equality in WHERE
@@ -756,12 +836,14 @@ HAVING (t1.a < 3);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
FROM t1
WHERE (t1.b = 14) AND (t1.a < 3)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # equality : equality in WHERE
@@ -774,12 +856,14 @@ HAVING (t1.a = 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,t1.b,MAX(t1.c)
FROM t1
WHERE (t1.b = 14) AND (t1.a = 1)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # equality : equality in WHERE, impossible WHERE
@@ -792,12 +876,14 @@ HAVING (t1.a = 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.c)
FROM t1
WHERE (t1.a = 3) AND (t1.a = 1)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # equality : equality in WHERE (equal through constant)
@@ -810,12 +896,14 @@ HAVING (t1.a = 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.c = 1) AND (t1.a = 1)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # inequality : AND formula in WHERE
@@ -828,12 +916,14 @@ HAVING (t1.a > 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.c > 0) AND (t1.c < 3) AND (t1.a > 1)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # equality : AND formula in WHERE
@@ -846,12 +936,14 @@ HAVING (t1.a = 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.c > 0) AND (t1.c < 3) AND (t1.a = 1)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # equality : AND formula in WHERE, impossible WHERE
@@ -864,12 +956,14 @@ HAVING (t1.a = 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.a > 0) AND (t1.c < 3) AND (t1.a = 1)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -881,12 +975,14 @@ HAVING (t1.a = 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b)
FROM t1
WHERE (t1.a = 0) AND (t1.a = 3) AND (t1.a = 1)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -898,12 +994,14 @@ HAVING (t3.a = 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t3.a,t3.b,MAX(t3.c),t3.d
FROM t3
WHERE (t3.b = 2) AND (t3.d = 1) AND (t3.a = 1)
GROUP BY t3.a,t3.b,t3.d;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # inequality : OR formula in WHERE
@@ -916,12 +1014,14 @@ HAVING (t1.a < 2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE ((t1.a > 1) OR (t1.c < 3)) AND (t1.a < 2)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -933,12 +1033,14 @@ HAVING (t1.a = 2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b)
FROM t1
WHERE ((t1.a = 1) OR (t1.a = 3)) AND (t1.a = 2)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # AND formula : inequality in WHERE
@@ -951,12 +1053,14 @@ HAVING (t1.a < 4) AND (t1.a > 0);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.a > 1) AND (t1.a < 4) AND (t1.a > 0)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # AND formula : equality in WHERE
@@ -969,12 +1073,14 @@ HAVING (t1.a < 4) AND (t1.a > 0);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.a = 1) AND (t1.a < 4) AND (t1.a > 0)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # OR formula : inequality in WHERE
@@ -987,12 +1093,14 @@ HAVING (t1.a < 4) OR (t1.a > 0);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.a > 1) AND ((t1.a < 4) OR (t1.a > 0))
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # OR formula : equality in WHERE
@@ -1005,12 +1113,14 @@ HAVING (t1.a < 4) OR (t1.a > 0);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.a = 1) AND ((t1.a < 4) OR (t1.a > 0))
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # AND formula : AND formula in WHERE
@@ -1023,6 +1133,7 @@ HAVING (t1.a < 4) AND (t1.c > 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -1031,6 +1142,7 @@ WHERE ((t1.a > 1) AND (t1.c < 3)) AND
(t1.a < 4)
GROUP BY t1.a
HAVING (t1.c > 1);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -1042,6 +1154,7 @@ HAVING (t1.a < 4) AND (t1.c > 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -1049,6 +1162,7 @@ FROM t1
WHERE ((t1.a = 1) AND (t1.c < 3)) AND
((t1.a < 4) AND (t1.c > 1))
GROUP BY t1.a,t1.c;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -1060,6 +1174,7 @@ HAVING (t1.a < 4) AND (t1.c > 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -1067,6 +1182,7 @@ FROM t1
WHERE ((t1.a = 1) AND (t1.c = 3)) AND
((t1.a < 4) AND (t1.c > 1))
GROUP BY t1.a,t1.c;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -1078,6 +1194,7 @@ HAVING (t3.b = 2) AND (t3.d > 0);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t3.a,t3.b,MAX(t3.c),t3.d
@@ -1086,6 +1203,7 @@ WHERE (t3.a = 1) AND (t3.d = 1) AND
(t3.b = 2)
GROUP BY t3.a,t3.b
HAVING (t3.d > 0);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -1097,6 +1215,7 @@ HAVING (t3.b = 2) AND (t3.d > 0);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t3.a,t3.b,MAX(t3.c),t3.d
@@ -1104,6 +1223,7 @@ FROM t3
WHERE (t3.a = 1) AND (t3.d = 1) AND
(t3.b = 2) AND (t3.d > 0)
GROUP BY t3.a,t3.b,t3.d;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # AND formula : OR formula in WHERE
@@ -1116,6 +1236,7 @@ HAVING (t1.a < 4) AND (t1.c > 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -1124,6 +1245,7 @@ WHERE ((t1.a > 1) OR (t1.c < 3)) AND
(t1.a < 4)
GROUP BY t1.a
HAVING (t1.c > 1);
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -1135,6 +1257,7 @@ HAVING (t1.a < 4) AND (t1.c > 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -1142,6 +1265,7 @@ FROM t1
WHERE ((t1.a > 1) OR (t1.c < 3)) AND
(t1.a < 4) AND (t1.c > 1)
GROUP BY t1.a,t1.c;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -1153,6 +1277,7 @@ HAVING (t1.a = 4) OR (t1.c > 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -1160,6 +1285,7 @@ FROM t1
WHERE ((t1.a = 1) OR (t1.a = 3)) AND
((t1.a = 4) OR (t1.c > 1))
GROUP BY t1.a,t1.c;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # equality : pushdown through equality in WHERE
@@ -1172,12 +1298,14 @@ HAVING (t1.c = 1);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
FROM t1
WHERE (t1.a = 1) AND (t1.a = t1.c) AND (t1.c = 1)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # OR formula : pushdown through equality
@@ -1190,6 +1318,7 @@ HAVING (t1.c = 1) OR (t1.c = 2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -1197,6 +1326,7 @@ FROM t1
WHERE (t1.a = 1) AND (t1.a = t1.c) AND
((t1.c = 1) OR (t1.c = 2))
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # OR formula : pushdown through equality, impossible WHERE
@@ -1209,6 +1339,7 @@ HAVING (t1.c = 3) OR (t1.c = 2);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -1216,6 +1347,7 @@ FROM t1
WHERE (t1.a = 1) AND (t1.a = t1.c) AND
((t1.c = 3) OR (t1.c = 2))
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # AND formula : pushdown through equality, impossible WHERE
@@ -1228,6 +1360,7 @@ HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -1235,6 +1368,7 @@ FROM t1
WHERE (t1.a = 1) AND (t1.c = 3) AND
(t1.a > 2) AND (t1.a = t1.c)
GROUP BY t1.a,t1.c;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
let $query=
@@ -1246,6 +1380,7 @@ HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t1.a,MAX(t1.b),t1.c
@@ -1253,6 +1388,7 @@ FROM t1
WHERE (t1.a = 1) AND (t1.c = 3) AND
(t1.a > 2) AND (t1.a = t1.c)
GROUP BY t1.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # AND formula with OR subformula : AND condition in WHERE
@@ -1265,6 +1401,7 @@ HAVING (t3.a = t3.d) AND ((t3.d = 1) OR (t3.d > 1));
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t3.a,MAX(t3.c),t3.d
@@ -1272,6 +1409,7 @@ FROM t3
WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2)) AND
(t3.a = t3.d) AND ((t3.d = 1) OR (t3.d > 1))
GROUP BY t3.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # AND formula with OR subformula : AND condition in WHERE
@@ -1284,6 +1422,7 @@ HAVING (t3.a = t3.d) AND (((t3.d = t3.c) AND (t3.c < 15)) OR (t3.d > 1));
eval $no_pushdown $query;
eval $query;
eval explain $query;
+--source include/explain-no-costs.inc
eval explain format=json $query;
let $query=
SELECT t3.a,t3.b,MAX(t3.c),t3.d
@@ -1291,6 +1430,7 @@ FROM t3
WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2)) AND
(t3.a = t3.d) AND (((t3.d = t3.c) AND (t3.c < 15)) OR (t3.d > 1))
GROUP BY t3.a;
+--source include/explain-no-costs.inc
eval $no_pushdown explain format=json $query;
--echo # prepare statement
@@ -1359,6 +1499,7 @@ select d1 from t1
eval $q1;
eval explain extended $q1;
+--source include/explain-no-costs.inc
eval explain format=json $q1;
delete from t1;
@@ -1371,6 +1512,7 @@ select d1 from t1
eval $q2;
eval explain extended $q2;
+--source include/explain-no-costs.inc
eval explain format=json $q2;
drop table t1;
@@ -1420,6 +1562,7 @@ INSERT INTO t2 VALUES (2),(3);
let $q=
SELECT a FROM t1 GROUP BY a HAVING a = 8 OR a = ( SELECT MIN(c) FROM t2 );
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q;
eval $q;
@@ -1427,6 +1570,7 @@ let $q=
SELECT a FROM t1 GROUP BY a,b
HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and b < 20;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q;
eval $q;
@@ -1434,12 +1578,14 @@ let $q=
SELECT a FROM t1 GROUP BY a
HAVING ( a = 8 OR a = ( SELECT MIN(c) FROM t2 ) ) and SUM(b) > 20;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q;
eval $q;
let $q=
SELECT a FROM t1 GROUP BY a HAVING a = ( SELECT MIN(c) FROM t2 );
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $q;
eval $q;
diff --git a/mysql-test/main/ignored_index.result b/mysql-test/main/ignored_index.result
index 03748d48098..176ca61decd 100644
--- a/mysql-test/main/ignored_index.result
+++ b/mysql-test/main/ignored_index.result
@@ -255,17 +255,15 @@ DROP TABLE t1;
# IGNORED fulltext indexes.
#
CREATE TABLE t1 (a VARCHAR(200), b TEXT, FULLTEXT (a,b));
-INSERT INTO t1 VALUES('Some data', 'for full-text search');
-ANALYZE TABLE t1;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze Warning Engine-independent statistics are not collected for column 'b'
-test.t1 analyze status OK
-EXPLAIN SELECT * FROM t1 WHERE MATCH(a, b) AGAINST ("collections");
+INSERT INTO t1 VALUES('Some data', 'for full-text search'),("hello","hello world"),("mars","here I come");
+SELECT * FROM t1 WHERE MATCH(a, b) AGAINST ("search");
+a b
+Some data for full-text search
+EXPLAIN SELECT * FROM t1 WHERE MATCH(a, b) AGAINST ("search");
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 fulltext a a 0 1 Using where
ALTER TABLE t1 ALTER INDEX a IGNORED;
-EXPLAIN SELECT * FROM t1 WHERE MATCH(a, b) AGAINST ("collections");
+SELECT * FROM t1 WHERE MATCH(a, b) AGAINST ("search");
ERROR HY000: Can't find FULLTEXT index matching the column list
DROP TABLE t1;
#
diff --git a/mysql-test/main/ignored_index.test b/mysql-test/main/ignored_index.test
index a3d46fe6046..b2fadcd5862 100644
--- a/mysql-test/main/ignored_index.test
+++ b/mysql-test/main/ignored_index.test
@@ -222,13 +222,11 @@ DROP TABLE t1;
--echo # IGNORED fulltext indexes.
--echo #
CREATE TABLE t1 (a VARCHAR(200), b TEXT, FULLTEXT (a,b));
-INSERT INTO t1 VALUES('Some data', 'for full-text search');
-ANALYZE TABLE t1;
-
-let $query=
-EXPLAIN SELECT * FROM t1 WHERE MATCH(a, b) AGAINST ("collections");
+INSERT INTO t1 VALUES('Some data', 'for full-text search'),("hello","hello world"),("mars","here I come");
+let $query=SELECT * FROM t1 WHERE MATCH(a, b) AGAINST ("search");
--eval $query
+--eval EXPLAIN $query
ALTER TABLE t1 ALTER INDEX a IGNORED;
--error ER_FT_MATCHING_KEY_NOT_FOUND
diff --git a/mysql-test/main/in_subq_cond_pushdown.result b/mysql-test/main/in_subq_cond_pushdown.result
index f114fc6824e..d0431852614 100644
--- a/mysql-test/main/in_subq_cond_pushdown.result
+++ b/mysql-test/main/in_subq_cond_pushdown.result
@@ -51,12 +51,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c < 25 and t1.a is not null and t1.c is not null"
}
@@ -70,12 +73,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "`MAX(t2.g)` < 25",
"temporary_table": {
"nested_loop": [
@@ -83,7 +89,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -150,12 +158,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c > 55 and t1.b < 4 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -169,12 +180,15 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "`MAX(t2.g)` > 55 and t2.f < 4",
"temporary_table": {
"nested_loop": [
@@ -182,7 +196,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -251,12 +267,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.c > 60 or t1.c < 25) and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -270,12 +289,15 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "`MAX(t2.g)` > 60 or `MAX(t2.g)` < 25",
"temporary_table": {
"nested_loop": [
@@ -283,7 +305,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -350,12 +374,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.c > 60 or t1.c < 25) and t1.b > 2 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -369,12 +396,15 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "(`MAX(t2.g)` > 60 or `MAX(t2.g)` < 25) and t2.f > 2",
"temporary_table": {
"nested_loop": [
@@ -382,7 +412,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -451,12 +483,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a < 2 or t1.d > 3) and t1.b > 1 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -470,12 +505,15 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "t2.f > 1",
"temporary_table": {
"nested_loop": [
@@ -483,7 +521,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -550,12 +590,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c > 20 and t1.a is not null and t1.c is not null"
}
@@ -569,12 +612,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["v1_x", "MAX(v1_y)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "`MAX(v1_y)` > 20",
"temporary_table": {
"nested_loop": [
@@ -582,7 +628,9 @@ EXPLAIN
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.x > 1 and t3.x <= 3"
}
@@ -650,12 +698,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.y > 20 and t3.x <= 3 and t3.y is not null"
}
@@ -665,7 +716,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 8,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -683,12 +736,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t3.y"],
+ "loops": 128,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "`MAX(t2.g)` > 20",
"temporary_table": {
"nested_loop": [
@@ -696,7 +752,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -765,12 +823,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 2 and t1.a is not null and t1.c is not null"
}
@@ -784,19 +845,24 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e < 2"
}
@@ -865,12 +931,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 2 and t1.a < 5 and t1.a is not null and t1.c is not null"
}
@@ -884,19 +953,24 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e > 2 and t2.e < 5"
}
@@ -967,12 +1041,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a < 2 or t1.a >= 4) and t1.a is not null and t1.c is not null"
}
@@ -986,19 +1063,24 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and (t2.e < 2 or t2.e >= 4)"
}
@@ -1065,12 +1147,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a < 2 or t1.a = 5) and t1.b > 3 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -1084,19 +1169,24 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and (t2.e < 2 or t2.e = 5) and t2.f > 3"
}
@@ -1163,12 +1253,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a < 2 or t1.a = 5) and t1.b > 3 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -1182,19 +1275,24 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and (t2.e < 2 or t2.e = 5) and t2.f > 3"
}
@@ -1261,12 +1359,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.b < 3 or t1.d > 2) and t1.a < 2 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -1280,19 +1381,24 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e < 2"
}
@@ -1359,12 +1465,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a = 1 and t1.d = 1 and t1.c is not null"
}
@@ -1378,18 +1487,23 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["const", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e = 1"
}
@@ -1455,12 +1569,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.d = t1.a and t1.a > 1 and t1.a is not null and t1.c is not null"
}
@@ -1474,19 +1591,24 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e > 1"
}
@@ -1551,12 +1673,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 3 and t1.a is not null and t1.c is not null"
}
@@ -1570,19 +1695,24 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["v1_x", "MAX(v1_y)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.x > 1 and t3.x <= 3 and t3.x < 3"
}
@@ -1652,12 +1782,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.x < 2 and t3.y > 30 and t3.x <= 3 and t3.x is not null"
}
@@ -1667,7 +1800,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 8,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -1685,19 +1820,24 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t3.x", "test.t1.c"],
+ "loops": 128,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e <= 3"
}
@@ -1765,12 +1905,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.b < 3 or t1.b = 4) and t1.a < 3 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -1784,12 +1927,15 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "t2.f < 3 or t2.f = 4",
"temporary_table": {
"nested_loop": [
@@ -1797,7 +1943,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e < 3"
}
@@ -1864,12 +2012,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a + t1.c > 41 and t1.a is not null and t1.c is not null"
}
@@ -1883,12 +2034,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "t2.e + `MAX(t2.g)` > 41",
"temporary_table": {
"nested_loop": [
@@ -1896,7 +2050,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -1965,12 +2121,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c - t1.a < 35 and t1.a is not null and t1.c is not null"
}
@@ -1984,12 +2143,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "`MAX(t2.g)` - t2.e < 35",
"temporary_table": {
"nested_loop": [
@@ -1997,7 +2159,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -2064,12 +2228,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c * t1.a > 100 and t1.a is not null and t1.c is not null"
}
@@ -2083,12 +2250,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "`MAX(t2.g)` * t2.e > 100",
"temporary_table": {
"nested_loop": [
@@ -2096,7 +2266,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -2167,12 +2339,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c / t1.a > 30 and t1.a is not null and t1.c is not null"
}
@@ -2186,12 +2361,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "`MAX(t2.g)` / t2.e > 30",
"temporary_table": {
"nested_loop": [
@@ -2199,7 +2377,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -2266,12 +2446,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c between 50 and 100 and t1.a is not null and t1.c is not null"
}
@@ -2285,12 +2468,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "`MAX(t2.g)` between 50 and 100",
"temporary_table": {
"nested_loop": [
@@ -2298,7 +2484,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -2365,12 +2553,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a + t1.b > 5 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -2384,19 +2575,24 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e + t2.f > 5"
}
@@ -2463,12 +2659,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a - t1.b > 0 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -2482,19 +2681,24 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e - t2.f > 0"
}
@@ -2561,12 +2765,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a * t1.b > 6 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -2580,19 +2787,24 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e * t2.f > 6"
}
@@ -2661,12 +2873,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b / t1.a > 2 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -2680,19 +2895,24 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.f / t2.e > 2"
}
@@ -2765,12 +2985,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a between 1 and 3 and t1.a is not null and t1.c is not null"
}
@@ -2784,19 +3007,24 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["e", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e between 1 and 3"
}
@@ -2867,12 +3095,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c > 3 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -2886,12 +3117,15 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "MAX(v2.f)", "max_g"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "v2.max_g > 3",
"temporary_table": {
"nested_loop": [
@@ -2899,12 +3133,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.e < 5",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_g > 25",
"filesort": {
"sort_key": "t2.e",
@@ -2914,7 +3151,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5"
}
@@ -2991,12 +3230,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -3010,24 +3252,30 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "MAX(v2.f)", "max_g"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.e < 5 and v2.e > 1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_g > 25",
"filesort": {
"sort_key": "t2.e",
@@ -3037,7 +3285,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e > 1"
}
@@ -3115,12 +3365,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.c < 100 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -3134,12 +3387,15 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "MAX(v2.f)", "max_g"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "v2.max_g < 100",
"temporary_table": {
"nested_loop": [
@@ -3147,12 +3403,15 @@ EXPLAIN
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "v2.e < 5 and v2.e > 1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_g > 25",
"filesort": {
"sort_key": "t2.e",
@@ -3162,7 +3421,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.e > 1"
}
@@ -3263,12 +3524,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -3282,24 +3546,30 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "MAX(d_tab.f)", "max_g"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "d_tab.e < 5 and d_tab.e > 1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_g > 25 and t2.e < 5 and t2.e > 1",
"filesort": {
"sort_key": "t2.f",
@@ -3309,7 +3579,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3428,12 +3700,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -3447,24 +3722,30 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "MAX(d_tab.f)", "max_g"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "d_tab.e < 5 and d_tab.e > 1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_g > 25 and t2.e < 5 and t2.e > 1",
"filesort": {
"sort_key": "t2.f",
@@ -3474,7 +3755,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3594,12 +3877,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -3613,24 +3899,30 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "MAX(d_tab.f)", "max_g"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "<derived3>",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "d_tab.e < 5 and d_tab.e > 1",
"materialized": {
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "max_g > 25 and t2.e < 5 and t2.e > 1",
"filesort": {
"sort_key": "t2.f",
@@ -3640,7 +3932,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3718,7 +4012,7 @@ GROUP BY t1.a
WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 8 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t3.x 2 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t3.x 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 16 Using where; Using temporary; Using filesort
2 DERIVED <subquery3> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
3 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
@@ -3742,12 +4036,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.x < 5 and t3.x is not null"
}
@@ -3761,12 +4058,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t3.x"],
- "rows": 2,
+ "loops": 8,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "d_tab.max_c < 70",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 70",
"filesort": {
"sort_key": "t1.a",
@@ -3776,7 +4076,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a < 5 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -3790,12 +4092,15 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "t2.f < 5",
"temporary_table": {
"nested_loop": [
@@ -3803,7 +4108,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e > 1 and t2.e < 5"
}
@@ -3903,7 +4210,7 @@ GROUP BY t1.a
WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 8 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t3.x 2 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t3.x 1 Using where
2 DERIVED t1 ALL NULL NULL NULL NULL 16 Using where; Using temporary; Using filesort
2 DERIVED <subquery3> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
3 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
@@ -3927,12 +4234,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t3.x < 5 and t3.x is not null"
}
@@ -3946,12 +4256,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t3.x"],
- "rows": 2,
+ "loops": 8,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "d_tab.max_c < 70",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"having_condition": "max_c < 70",
"filesort": {
"sort_key": "t1.a",
@@ -3961,7 +4274,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a > 1 and t1.a < 5 and t1.a is not null and t1.b is not null and t1.c is not null"
}
@@ -3975,12 +4290,15 @@ EXPLAIN
"key_length": "12",
"used_key_parts": ["e", "f", "MAX(t2.g)"],
"ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"having_condition": "t2.f < 5",
"temporary_table": {
"nested_loop": [
@@ -3988,7 +4306,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e > 1 and t2.e < 5"
}
@@ -4065,12 +4385,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 1 and t1.b is not null and t1.c is not null"
}
@@ -4084,12 +4407,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["f", "MAX(t2.g) OVER (PARTITION BY t2.f)"],
"ref": ["test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -4104,7 +4430,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.f > 1"
}
@@ -4169,12 +4497,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 16,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b > 1 and t1.b is not null and t1.c is not null"
}
@@ -4191,13 +4522,16 @@ EXPLAIN
"CAST(SUM(t2.g) OVER (PARTITION BY t2.f) AS INT)"
],
"ref": ["test.t1.b", "test.t1.c"],
+ "loops": 16,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.c = `<subquery2>`.`CAST(SUM(t2.g) OVER (PARTITION BY t2.f) AS INT)`",
"materialized": {
"unique": 1,
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -4212,7 +4546,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.e < 5 and t2.f > 1"
}
diff --git a/mysql-test/main/in_subq_cond_pushdown.test b/mysql-test/main/in_subq_cond_pushdown.test
index 7763201cda1..8a12a2c5977 100644
--- a/mysql-test/main/in_subq_cond_pushdown.test
+++ b/mysql-test/main/in_subq_cond_pushdown.test
@@ -42,6 +42,7 @@ WHERE t1.c<25 AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # extracted AND formula : pushing into HAVING
@@ -60,6 +61,7 @@ WHERE t1.c>55 AND t1.b<4 AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # extracted OR formula : pushing into HAVING
@@ -78,6 +80,7 @@ WHERE (t1.c>60 OR t1.c<25) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # extracted AND-OR formula : pushing into HAVING
@@ -96,6 +99,7 @@ WHERE ((t1.c>60 OR t1.c<25) AND t1.b>2) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into HAVING
@@ -114,6 +118,7 @@ WHERE ((t1.a<2 OR t1.d>3) AND t1.b>1) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # using view IN subquery defINition : pushing into HAVING
@@ -132,6 +137,7 @@ WHERE t1.c>20 AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # using equality : pushing into WHERE
@@ -150,6 +156,7 @@ WHERE t1.c>20 AND t1.c=v1_y AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into WHERE
@@ -168,6 +175,7 @@ WHERE t1.a<2 AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # extracted AND formula : pushing into WHERE
@@ -186,6 +194,7 @@ WHERE t1.a>2 AND t1.a<5 AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # extracted OR formula : pushing into WHERE
@@ -204,6 +213,7 @@ WHERE (t1.a<2 OR t1.a>=4) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # extracted AND-OR formula : pushing into WHERE
@@ -222,6 +232,7 @@ WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # extracted AND-OR formula : pushing into WHERE
@@ -240,6 +251,7 @@ WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into WHERE
@@ -258,6 +270,7 @@ WHERE ((t1.b<3 OR t1.d>2) AND t1.a<2) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # using equalities : pushing into WHERE
@@ -276,6 +289,7 @@ WHERE t1.d=1 AND t1.a=t1.d AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # using equality : pushing into WHERE
@@ -294,6 +308,7 @@ WHERE t1.d>1 AND t1.a=t1.d AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # using view IN subquery definition : pushing into WHERE
@@ -312,6 +327,7 @@ WHERE t1.a<3 AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # using equality : pushing into WHERE
@@ -330,6 +346,7 @@ WHERE t1.a=v1_x AND v1_x<2 AND v1_y>30 AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into WHERE
@@ -349,6 +366,7 @@ WHERE ((t1.b<3 OR t1.b=4) AND t1.a<3) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula using addition : pushing into HAVING
@@ -367,6 +385,7 @@ WHERE (t1.a+t1.c>41) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula using substitution : pushing into HAVING
@@ -385,6 +404,7 @@ WHERE (t1.c-t1.a<35) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula using multiplication : pushing into HAVING
@@ -403,6 +423,7 @@ WHERE (t1.c*t1.a>100) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula using division : pushing into HAVING
@@ -421,6 +442,7 @@ WHERE (t1.c/t1.a>30) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula using BETWEEN : pushing into HAVING
@@ -439,6 +461,7 @@ WHERE (t1.c BETWEEN 50 AND 100) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula using addition : pushing into WHERE
@@ -457,6 +480,7 @@ WHERE (t1.a+t1.b > 5) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula using substitution : pushing into WHERE
@@ -475,6 +499,7 @@ WHERE (t1.a-t1.b > 0) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula using multiplication : pushing into WHERE
@@ -493,6 +518,7 @@ WHERE (t1.a*t1.b > 6) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula using division : pushing into WHERE
@@ -511,6 +537,7 @@ WHERE (t1.b/t1.a > 2) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula using BETWEEN : pushing into WHERE
@@ -529,6 +556,7 @@ WHERE (t1.a BETWEEN 1 AND 3) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into HAVING of the IN subquery
@@ -548,6 +576,7 @@ WHERE t1.c>3 AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into WHERE of the IN subquery
@@ -568,6 +597,7 @@ WHERE t1.a>1 AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into WHERE and HAVING
@@ -589,6 +619,7 @@ WHERE t1.a>1 AND t1.c<100 AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into WHERE of the IN subquery
@@ -615,6 +646,7 @@ WHERE t1.a>1 AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into HAVING of the derived table
@@ -640,6 +672,7 @@ WHERE d_tab.a=t3.x AND d_tab.b>2;
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into WHERE of the derived table
@@ -665,6 +698,7 @@ WHERE d_tab.a=t3.x AND d_tab.a<5;
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into WHERE and HAVING
@@ -692,6 +726,7 @@ WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into WHERE of the derived table
@@ -717,6 +752,7 @@ WHERE d_tab.a=t3.x AND d_tab.a<5;
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into WHERE
@@ -735,6 +771,7 @@ WHERE (t1.b>1) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
--echo # conjunctive subformula : pushing into WHERE
@@ -753,6 +790,7 @@ WHERE (t1.b>1) AND
EVAL $no_pushdown $query;
EVAL $query;
EVAL EXPLAIN $query;
+--source include/explain-no-costs.inc
EVAL EXPLAIN FORMAT=JSON $query;
DROP TABLE t1,t2,t3;
diff --git a/mysql-test/main/index_intersect.result b/mysql-test/main/index_intersect.result
index 3ec98216479..c6332591ff5 100644
--- a/mysql-test/main/index_intersect.result
+++ b/mysql-test/main/index_intersect.result
@@ -80,7 +80,7 @@ EXPLAIN
SELECT * FROM City
WHERE Name LIKE 'M%' AND Population > 7000000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Name Population,Name 4,35 NULL # Using sort_intersect(Population,Name); Using where
+1 SIMPLE City range Population,Name Population 4 NULL # Using index condition; Using where
SELECT * FROM City USE INDEX ()
WHERE Name LIKE 'C%' AND Population > 1000000;
ID Name Country Population
@@ -335,8 +335,8 @@ ID Name Country Population
SELECT * FROM City
WHERE Name LIKE 'M%' AND Population > 7000000;
ID Name Country Population
-1024 Mumbai (Bombay) IND 10500000
3580 Moscow RUS 8389200
+1024 Mumbai (Bombay) IND 10500000
SELECT COUNT(*) FROM City WHERE Name BETWEEN 'M' AND 'N';
COUNT(*)
301
@@ -355,9 +355,6 @@ COUNT(*)
SELECT COUNT(*) FROM City WHERE Country LIKE 'C%';
COUNT(*)
551
-SELECT COUNT(*) FROM City WHERE Country LIKE 'B%';
-COUNT(*)
-339
SELECT COUNT(*) FROM City WHERE Country LIKE 'J%';
COUNT(*)
256
@@ -370,12 +367,12 @@ EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'K' AND Population > 1000000 AND Country LIKE 'J%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Country,Name Population,Country,Name 4,3,35 NULL # Using sort_intersect(Population,Country,Name); Using where
+1 SIMPLE City index_merge Population,Country,Name Population,Country 4,3 NULL # Using sort_intersect(Population,Country); Using where
EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'K' AND Population > 500000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Name,Country Name,Country,Population # NULL # Using sort_intersect(Name,Country,Population); Using where
+1 SIMPLE City index_merge Population,Name,Country Name,Country # NULL # Using sort_intersect(Name,Country); Using where
SELECT * FROM City USE INDEX ()
WHERE Name BETWEEN 'M' AND 'N' AND Population > 1000000 AND Country LIKE 'C%';
ID Name Country Population
@@ -466,17 +463,17 @@ EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 501 AND 1000 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country Population,PRIMARY,Country 4,4,3 NULL # Using sort_intersect(Population,PRIMARY,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country Population,PRIMARY 4,4 NULL # Using sort_intersect(Population,PRIMARY); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country Population,PRIMARY,Country 4,4,3 NULL # Using sort_intersect(Population,PRIMARY,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country Population,PRIMARY 4,4 NULL # Using sort_intersect(Population,PRIMARY); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 2001 AND 2500 AND Population > 300000 AND Country LIKE 'H%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country Country,PRIMARY 3,4 NULL # Using sort_intersect(Country,PRIMARY); Using where
+1 SIMPLE City range PRIMARY,Population,Country Country 3 NULL # Using index condition; Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 3701 AND 4000 AND Population > 1000000
@@ -488,7 +485,7 @@ SELECT * FROM City
WHERE ID BETWEEN 3001 AND 4000 AND Population > 600000
AND Country BETWEEN 'S' AND 'Z' ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range PRIMARY,Population,Country Population 4 NULL # Using index condition; Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country Population,Country 4,3 NULL # Using sort_intersect(Population,Country); Using where
SELECT * FROM City USE INDEX ()
WHERE ID BETWEEN 501 AND 1000 AND Population > 700000 AND Country LIKE 'C%';
ID Name Country Population
@@ -720,13 +717,13 @@ EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range PRIMARY,Population,Country Population 4 NULL # Using index condition; Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country Population,PRIMARY 4,4 NULL # Using sort_intersect(Population,PRIMARY); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 3001 AND 4000 AND Population > 600000
AND Country BETWEEN 'S' AND 'Z';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range PRIMARY,Population,Country Population 4 NULL # Using index condition; Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country Population,Country 4,3 NULL # Using sort_intersect(Population,Country); Using where
SELECT * FROM City WHERE
Name LIKE 'C%' AND Population > 1000000;
ID Name Country Population
@@ -966,7 +963,7 @@ EXPLAIN
SELECT * FROM t1
WHERE (f1 < 535 OR f1 > 985) AND ( f4='r' OR f4 LIKE 'a%' ) ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,f4 f4 35 NULL # Using index condition; Using where
+1 SIMPLE t1 index_merge PRIMARY,f4 f4,PRIMARY 35,4 NULL # Using sort_intersect(f4,PRIMARY); Using where
SELECT * FROM t1
WHERE (f1 < 535 OR f1 > 985) AND ( f4='r' OR f4 LIKE 'a%' ) ;
f1 f4 f5
diff --git a/mysql-test/main/index_intersect.test b/mysql-test/main/index_intersect.test
index 26937fd5eef..d6208f67a92 100644
--- a/mysql-test/main/index_intersect.test
+++ b/mysql-test/main/index_intersect.test
@@ -120,7 +120,6 @@ SELECT COUNT(*) FROM City WHERE Name BETWEEN 'G' AND 'K';
SELECT COUNT(*) FROM City WHERE Population > 1000000;
SELECT COUNT(*) FROM City WHERE Population > 500000;
SELECT COUNT(*) FROM City WHERE Country LIKE 'C%';
-SELECT COUNT(*) FROM City WHERE Country LIKE 'B%';
SELECT COUNT(*) FROM City WHERE Country LIKE 'J%';
diff --git a/mysql-test/main/index_intersect_innodb.result b/mysql-test/main/index_intersect_innodb.result
index 44407dbcd30..2aba9dc6fcf 100644
--- a/mysql-test/main/index_intersect_innodb.result
+++ b/mysql-test/main/index_intersect_innodb.result
@@ -81,12 +81,12 @@ EXPLAIN
SELECT * FROM City
WHERE Name LIKE 'M%' AND Population > 300000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Population,Name Name 35 NULL # Using index condition; Using where
+1 SIMPLE City index_merge Population,Name Name,Population 35,4 NULL # Using sort_intersect(Name,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE Name LIKE 'M%' AND Population > 7000000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Name Population,Name 4,35 NULL # Using sort_intersect(Population,Name); Using where
+1 SIMPLE City range Population,Name Population 4 NULL # Using index condition; Using where
SELECT * FROM City USE INDEX ()
WHERE Name LIKE 'C%' AND Population > 1000000;
ID Name Country Population
@@ -341,8 +341,8 @@ ID Name Country Population
SELECT * FROM City
WHERE Name LIKE 'M%' AND Population > 7000000;
ID Name Country Population
-1024 Mumbai (Bombay) IND 10500000
3580 Moscow RUS 8389200
+1024 Mumbai (Bombay) IND 10500000
SELECT COUNT(*) FROM City WHERE Name BETWEEN 'M' AND 'N';
COUNT(*)
301
@@ -361,9 +361,6 @@ COUNT(*)
SELECT COUNT(*) FROM City WHERE Country LIKE 'C%';
COUNT(*)
551
-SELECT COUNT(*) FROM City WHERE Country LIKE 'B%';
-COUNT(*)
-339
SELECT COUNT(*) FROM City WHERE Country LIKE 'J%';
COUNT(*)
256
@@ -371,17 +368,17 @@ EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'M' AND 'N' AND Population > 1000000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Country,Name Population,Name,Country 4,35,3 NULL # Using sort_intersect(Population,Name,Country); Using where
+1 SIMPLE City index_merge Population,Country,Name Population,Name 4,35 NULL # Using sort_intersect(Population,Name); Using where
EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'K' AND Population > 1000000 AND Country LIKE 'J%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Country,Name Population,Country,Name 4,3,35 NULL # Using sort_intersect(Population,Country,Name); Using where
+1 SIMPLE City index_merge Population,Country,Name Population,Country 4,3 NULL # Using sort_intersect(Population,Country); Using where
EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'K' AND Population > 500000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Population,Name,Country Name # NULL # Using index condition; Using where
+1 SIMPLE City index_merge Population,Name,Country Name,Population # NULL # Using sort_intersect(Name,Population); Using where
SELECT * FROM City USE INDEX ()
WHERE Name BETWEEN 'M' AND 'N' AND Population > 1000000 AND Country LIKE 'C%';
ID Name Country Population
@@ -472,29 +469,29 @@ EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 501 AND 1000 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population,Country 4,4,7 NULL # Using sort_intersect(PRIMARY,Population,Country); Using where
+1 SIMPLE City range PRIMARY,Population,Country PRIMARY 4 NULL # Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population,Country 4,4,7 NULL # Using sort_intersect(PRIMARY,Population,Country); Using where
+1 SIMPLE City range PRIMARY,Population,Country PRIMARY 4 NULL # Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 2001 AND 2500 AND Population > 300000 AND Country LIKE 'H%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population,Country 4,4,7 NULL # Using sort_intersect(PRIMARY,Population,Country); Using where
+1 SIMPLE City range PRIMARY,Population,Country Country 7 NULL # Using index condition; Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 3701 AND 4000 AND Population > 1000000
AND Country BETWEEN 'S' AND 'Z';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population,Country 4,4,7 NULL # Using sort_intersect(PRIMARY,Population,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population 4,4 NULL # Using sort_intersect(PRIMARY,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 3001 AND 4000 AND Population > 600000
AND Country BETWEEN 'S' AND 'Z' ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population,Country 4,4,7 NULL # Using sort_intersect(PRIMARY,Population,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population 4,4 NULL # Using sort_intersect(PRIMARY,Population); Using where
SELECT * FROM City USE INDEX ()
WHERE ID BETWEEN 501 AND 1000 AND Population > 700000 AND Country LIKE 'C%';
ID Name Country Population
@@ -711,7 +708,7 @@ EXPLAIN
SELECT * FROM City WHERE
Name LIKE 'M%' AND Population > 1500000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Name Population,Name 4,35 NULL # Using sort_intersect(Population,Name); Using where
+1 SIMPLE City range Population,Name Population 4 NULL # Using index condition; Using where
EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'K' AND Population > 1000000 AND Country LIKE 'J%';
@@ -721,12 +718,12 @@ EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'J' AND Population > 500000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Population,Country,Name Name 35 NULL # Using index condition; Using where
+1 SIMPLE City index_merge Population,Country,Name Name,Population 35,4 NULL # Using sort_intersect(Name,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population,Country 4,4,7 NULL # Using sort_intersect(PRIMARY,Population,Country); Using where
+1 SIMPLE City range PRIMARY,Population,Country PRIMARY 4 NULL # Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 3001 AND 4000 AND Population > 600000
diff --git a/mysql-test/main/index_merge_innodb.result b/mysql-test/main/index_merge_innodb.result
index d9be08a0540..6f1daf7552c 100644
--- a/mysql-test/main/index_merge_innodb.result
+++ b/mysql-test/main/index_merge_innodb.result
@@ -582,7 +582,7 @@ set @tmp_index_merge_ror_cpk=@@optimizer_switch;
set optimizer_switch='extended_keys=off';
explain select * from t1 where pk1 < 7500 and key1 = 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge PRIMARY,key1 key1,PRIMARY 4,4 NULL ROWS Using intersect(key1,PRIMARY); Using where
+1 SIMPLE t1 ref PRIMARY,key1 key1 4 const ROWS Using index condition
set optimizer_switch=@tmp_index_merge_ror_cpk;
explain select * from t1 where pktail1ok=1 and key1=10;
id select_type table type possible_keys key key_len ref rows Extra
@@ -654,7 +654,7 @@ f1
EXPLAIN SELECT t1.f1 FROM t1
WHERE (SELECT COUNT(*) FROM t2 WHERE t2.f3 = 'h' AND t2.f2 = t1.f1) = 0 AND t1.f1 = 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1 Using index
+1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1
2 SUBQUERY t2 index_merge f2,f3 f3,f2 2,5 NULL 1 Using intersect(f3,f2); Using where; Using index
DROP TABLE t1,t2;
#
@@ -827,13 +827,13 @@ INDEX (b)
INSERT INTO t1 SELECT seq, seq, seq from seq_1_to_100;
EXPLAIN SELECT * FROM t1 WHERE a='1' OR b < 5;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge PRIMARY,b b,PRIMARY 5,3074 NULL 5 Using sort_union(b,PRIMARY); Using where
+1 SIMPLE t1 ALL PRIMARY,b NULL NULL NULL 100 Using where
SELECT * FROM t1 WHERE a='1' OR b < 5;
a b c
+1 1 1
2 2 2
3 3 3
4 4 4
-1 1 1
DROP TABLE t1;
SET sort_buffer_size= @save_sort_buffer_size;
disconnect disable_purge;
diff --git a/mysql-test/main/index_merge_myisam.result b/mysql-test/main/index_merge_myisam.result
index 1ecc1ded83a..5866612c186 100644
--- a/mysql-test/main/index_merge_myisam.result
+++ b/mysql-test/main/index_merge_myisam.result
@@ -232,7 +232,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index i1_3,i2_3 i321 12 NULL 1024 Using where; Using index
explain select key7 from t2 where key1 <100 or key2 < 100;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 index_merge i1_3,i2_3 i1_3,i2_3 4,4 NULL 186 Using sort_union(i1_3,i2_3); Using where
+1 SIMPLE t2 ALL i1_3,i2_3 NULL NULL NULL 1024 Using where
create table t4 (
key1a int not null,
key1b int not null,
@@ -405,8 +405,8 @@ from t0 as A straight_join t0 as B
where (A.key1 = 1 and A.key2 = 1 and A.key3 = 1 and A.key4=1 and A.key5=1 and A.key6=1 and A.key7 = 1 or A.key8=1)
and (B.key1 = 1 and B.key2 = 1 and B.key3 = 1 and B.key4=1 and B.key5=1 and B.key6=1 and B.key7 = 1 or B.key8=1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE A index_merge i1,i2,i3,i4,i5,i6,i7?,i8 i2,i3,i4,i5,i6,i7?,i8 X NULL # Using union(intersect(i2,i3,i4,i5,i6,i7?),i8); Using where
-1 SIMPLE B index_merge i1,i2,i3,i4,i5,i6,i7?,i8 i2,i3,i4,i5,i6,i7?,i8 X NULL # Using union(intersect(i2,i3,i4,i5,i6,i7?),i8); Using where; Using join buffer (flat, BNL join)
+1 SIMPLE A ALL i1,i2,i3,i4,i5,i6,i7?,i8 NULL NULL NULL # Using where
+1 SIMPLE B ALL i1,i2,i3,i4,i5,i6,i7?,i8 NULL NULL NULL # Using where; Using join buffer (flat, BNL join)
select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5)
from t0 as A straight_join t0 as B
where (A.key1 = 1 and A.key2 = 1 and A.key3 = 1 and A.key4=1 and A.key5=1 and A.key6=1 and A.key7 = 1 or A.key8=1)
@@ -554,7 +554,7 @@ DROP TABLE t1;
create table t1 (a int);
insert into t1 values (1),(2);
create table t2(a int, b int);
-insert into t2 values (1,1), (2, 1000);
+insert into t2 values (1,1), (2, 1000),(5000,5000);
create table t3 (a int, b int, filler char(100), key(a), key(b));
insert into t3 select 1000, 1000,'filler' from seq_1_to_1000;
insert into t3 values (1,1,'data');
@@ -566,7 +566,7 @@ where t2.a=t1.a and (t3.a=t2.b or t3.b=t2.b or t3.b=t2.b+1));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
2 MATERIALIZED t3 ALL a,b NULL NULL NULL 1002 Range checked for each record (index map: 0x3)
select * from t1
where exists (select 1 from t2, t3
@@ -683,7 +683,7 @@ key1 key2 key3 key4 filler1
-1 -1 100 100 key4-key3
explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge key1,key2,key3 key1,key2,key3 5,5,5 NULL 2 Using intersect(key1,key2,key3); Using where; Using index
+1 SIMPLE t1 index_merge key1,key2,key3 key1,key2 5,5 NULL 77 Using intersect(key1,key2); Using where
select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100;
key1 key2 key3
100 100 100
@@ -762,7 +762,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b st_a,st_b 4,4 NULL 3515 Using intersect(st_a,st_b); Using where; Using index
explain select st_a from t1 ignore index (st_a) where st_a=1 and st_b=1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,stb_swt1a_2b,stb_swt1b,st_b st_b 4 const 15094 Using where
+1 SIMPLE t1 ALL sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,stb_swt1a_2b,stb_swt1b,st_b NULL NULL NULL 64806 Using where
explain select * from t1 where st_a=1 and swt1a=1 and swt2a=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a sta_swt21a 12 const,const,const 971
@@ -783,7 +783,7 @@ id select_type table type possible_keys key key_len ref rows Extra
explain select * from t1 ignore index (sta_swt21a, sta_swt12a, stb_swt1a_2b, stb_swt1b)
where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1 and swt2b=1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge sta_swt1a,sta_swt2a,st_a,st_b sta_swt1a,sta_swt2a,st_b 8,8,4 NULL 223 Using intersect(sta_swt1a,sta_swt2a,st_b); Using where
+1 SIMPLE t1 index_merge sta_swt1a,sta_swt2a,st_a,st_b sta_swt1a,sta_swt2a 8,8 NULL 960 Using intersect(sta_swt1a,sta_swt2a); Using where
explain select * from t1
where st_a=1 and swt1a=1 and swt2a=1 and st_b=1 and swt1b=1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -910,7 +910,7 @@ INSERT INTO t1 (key1, key2, filler)
SELECT seq/4, seq/8, 'filler-data' FROM seq_30_to_0;
explain select pk from t1 where key1 = 1 and key2 = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref key1,key2 key1 5 const 4 Using where
+1 SIMPLE t1 index_merge key1,key2 key1,key2 5,4 NULL 1 Using intersect(key1,key2); Using where
select pk from t1 where key2 = 1 and key1 = 1;
pk
26
@@ -1487,7 +1487,7 @@ EXPLAIN SELECT t1.f1 FROM t1
WHERE (SELECT COUNT(*) FROM t2 WHERE t2.f3 = 'h' AND t2.f2 = t1.f1) = 0 AND t1.f1 = 2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
-2 SUBQUERY t2 ref f2,f3 f3 2 const 2 Using index condition; Using where
+2 SUBQUERY t2 index_merge f2,f3 f3,f2 2,5 NULL 1 Using intersect(f3,f2); Using where; Using index
DROP TABLE t1,t2;
create table t0 (a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
diff --git a/mysql-test/main/information_schema-big.result b/mysql-test/main/information_schema-big.result
index 5c519014800..94c8d274fad 100644
--- a/mysql-test/main/information_schema-big.result
+++ b/mysql-test/main/information_schema-big.result
@@ -37,6 +37,7 @@ INDEX_STATISTICS TABLE_SCHEMA
KEYWORDS WORD
KEY_CACHES KEY_CACHE_NAME
KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
+OPTIMIZER_COSTS ENGINE
PARAMETERS SPECIFIC_SCHEMA
PARTITIONS TABLE_SCHEMA
PLUGINS PLUGIN_NAME
@@ -97,6 +98,7 @@ INDEX_STATISTICS TABLE_SCHEMA
KEYWORDS WORD
KEY_CACHES KEY_CACHE_NAME
KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
+OPTIMIZER_COSTS ENGINE
PARAMETERS SPECIFIC_SCHEMA
PARTITIONS TABLE_SCHEMA
PLUGINS PLUGIN_NAME
diff --git a/mysql-test/main/information_schema.result b/mysql-test/main/information_schema.result
index e46014e44b9..0c952886d63 100644
--- a/mysql-test/main/information_schema.result
+++ b/mysql-test/main/information_schema.result
@@ -71,6 +71,7 @@ INDEX_STATISTICS
KEYWORDS
KEY_CACHES
KEY_COLUMN_USAGE
+OPTIMIZER_COSTS
OPTIMIZER_TRACE
PARAMETERS
PARTITIONS
@@ -1434,7 +1435,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tables ALL NULL NULL NULL NULL NULL Open_frm_only; Scanned all databases; Using filesort
explain select * from (select table_name from information_schema.tables) as a;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 100
2 DERIVED tables ALL NULL NULL NULL NULL NULL Skip_open_table; Scanned all databases
set optimizer_switch=@tmp_optimizer_switch;
drop view v1;
diff --git a/mysql-test/main/information_schema_all_engines.result b/mysql-test/main/information_schema_all_engines.result
index 23a853e363c..db9bf156b8e 100644
--- a/mysql-test/main/information_schema_all_engines.result
+++ b/mysql-test/main/information_schema_all_engines.result
@@ -42,6 +42,7 @@ INNODB_TRX
KEYWORDS
KEY_CACHES
KEY_COLUMN_USAGE
+OPTIMIZER_COSTS
OPTIMIZER_TRACE
PARAMETERS
PARTITIONS
@@ -123,6 +124,7 @@ INNODB_TRX trx_id
KEYWORDS WORD
KEY_CACHES KEY_CACHE_NAME
KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
+OPTIMIZER_COSTS ENGINE
OPTIMIZER_TRACE QUERY
PARAMETERS SPECIFIC_SCHEMA
PARTITIONS TABLE_SCHEMA
@@ -204,6 +206,7 @@ INNODB_TRX trx_id
KEYWORDS WORD
KEY_CACHES KEY_CACHE_NAME
KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
+OPTIMIZER_COSTS ENGINE
OPTIMIZER_TRACE QUERY
PARAMETERS SPECIFIC_SCHEMA
PARTITIONS TABLE_SCHEMA
@@ -289,6 +292,7 @@ INNODB_TABLESPACES_ENCRYPTION information_schema.INNODB_TABLESPACES_ENCRYPTION 1
INNODB_TRX information_schema.INNODB_TRX 1
KEY_CACHES information_schema.KEY_CACHES 1
KEY_COLUMN_USAGE information_schema.KEY_COLUMN_USAGE 1
+OPTIMIZER_COSTS information_schema.OPTIMIZER_COSTS 1
OPTIMIZER_TRACE information_schema.OPTIMIZER_TRACE 1
PARAMETERS information_schema.PARAMETERS 1
PARTITIONS information_schema.PARTITIONS 1
@@ -359,6 +363,7 @@ Database: information_schema
| KEYWORDS |
| KEY_CACHES |
| KEY_COLUMN_USAGE |
+| OPTIMIZER_COSTS |
| OPTIMIZER_TRACE |
| PARAMETERS |
| PARTITIONS |
@@ -430,6 +435,7 @@ Database: INFORMATION_SCHEMA
| KEYWORDS |
| KEY_CACHES |
| KEY_COLUMN_USAGE |
+| OPTIMIZER_COSTS |
| OPTIMIZER_TRACE |
| PARAMETERS |
| PARTITIONS |
@@ -463,5 +469,5 @@ Wildcard: inf_rmation_schema
| information_schema |
SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') GROUP BY TABLE_SCHEMA;
table_schema count(*)
-information_schema 66
+information_schema 67
mysql 31
diff --git a/mysql-test/main/information_schema_db.result b/mysql-test/main/information_schema_db.result
index 725b1a125ad..b5687956414 100644
--- a/mysql-test/main/information_schema_db.result
+++ b/mysql-test/main/information_schema_db.result
@@ -26,8 +26,6 @@ declare ret_val int;
select max(f1) from t1 into ret_val;
return ret_val;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create view v1 as select f1 from t1 where f1 = func1(f1);
create function func2() returns int return 1;
use mbase;
@@ -100,7 +98,7 @@ grant insert on v1 to testdb_2@localhost;
create view v5 as select f1 from t1;
grant select, show view on v5 to testdb_2@localhost;
create definer=`no_such_user`@`no_such_host` view v6 as select f1 from t1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
connection default;
use testdb_1;
create view v6 as select f1 from t1;
diff --git a/mysql-test/main/innodb_ext_key,off.rdiff b/mysql-test/main/innodb_ext_key,off.rdiff
index b334d006737..ef11f9c05bc 100644
--- a/mysql-test/main/innodb_ext_key,off.rdiff
+++ b/mysql-test/main/innodb_ext_key,off.rdiff
@@ -1,5 +1,5 @@
---- innodb_ext_key.result
-+++ innodb_ext_key,off.result
+--- main/innodb_ext_key.result
++++ main/innodb_ext_key,off.reject
@@ -9,7 +9,7 @@
explain
select count(*) from lineitem where l_orderkey=130 and l_shipdate='1992-07-01';
@@ -172,7 +172,7 @@
where l_partkey between 1 and 10 group by l_partkey;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range i_l_suppkey_partkey,i_l_partkey i_l_partkey 5 NULL # Using where; Using index for group-by
-+1 SIMPLE lineitem range i_l_suppkey_partkey,i_l_partkey i_l_partkey 5 NULL # Using where; Using index
++1 SIMPLE lineitem range i_l_suppkey_partkey,i_l_partkey i_l_suppkey_partkey 5 NULL # Using where; Using index
flush status;
select max(l_orderkey) from lineitem
where l_partkey between 1 and 10 group by l_partkey;
@@ -230,17 +230,17 @@
Handler_read_retry 0
Handler_read_rnd 0
@@ -314,8 +314,8 @@
- select * from t0, part ignore index (primary)
+ select straight_join * from t0, part ignore index (primary)
where p_partkey=t0.a and p_size=1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 ALL NULL NULL NULL NULL 5 Using where
-1 SIMPLE part eq_ref i_p_size i_p_size 9 const,dbt3_s001.t0.a 1
+1 SIMPLE t0 ALL NULL NULL NULL NULL 5
+1 SIMPLE part ref i_p_size i_p_size 5 const 5 Using index condition
- select * from t0, part ignore index (primary)
+ select straight_join * from t0, part ignore index (primary)
where p_partkey=t0.a and p_size=1;
a p_partkey p_name p_mfgr p_brand p_type p_size p_container p_retailprice p_comment
-@@ -494,7 +494,7 @@
+@@ -495,7 +495,7 @@
select * from t1, t3 where t3.col1=t1.a and t3.col2=t1.a and t3.pk1=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL # Using where
@@ -249,7 +249,7 @@
drop table t1,t2,t3;
#
# Bug mdev-4340: performance regression with extended_keys=on
-@@ -714,13 +714,13 @@
+@@ -726,13 +726,13 @@
select * from t1 force index(index_date_updated)
where index_date_updated= 10 and index_id < 800;
id select_type table type possible_keys key key_len ref rows Extra
@@ -265,7 +265,7 @@
drop table t0,t1,t2;
#
# MDEV-11196: Error:Run-Time Check Failure #2 - Stack around the variable 'key_buff'
-@@ -768,11 +768,12 @@
+@@ -770,11 +770,12 @@
{
"table": {
"table_name": "t1",
@@ -278,10 +278,10 @@
+ "key_length": "3066",
+ "used_key_parts": ["f2"],
+ "ref": ["const"],
+ "loops": 1,
"rows": 1,
- "filtered": 100,
- "index_condition": "t1.pk1 <= 5 and t1.pk2 <= 5 and t1.f2 = 'abc'",
-@@ -805,8 +806,8 @@
+ "cost": "COST_REPLACED",
+@@ -810,8 +811,8 @@
"access_type": "range",
"possible_keys": ["k1"],
"key": "k1",
@@ -289,6 +289,6 @@
- "used_key_parts": ["pk1", "f2", "pk2"],
+ "key_length": "3007",
+ "used_key_parts": ["pk1", "f2"],
+ "loops": 1,
"rows": 1,
- "filtered": 100,
- "index_condition": "t1.f2 <= 5 and t1.pk2 <= 5 and t1.pk1 = 'abc'",
+ "cost": "COST_REPLACED",
diff --git a/mysql-test/main/innodb_ext_key.result b/mysql-test/main/innodb_ext_key.result
index 02e199bc58a..d1f4c60f5b2 100644
--- a/mysql-test/main/innodb_ext_key.result
+++ b/mysql-test/main/innodb_ext_key.result
@@ -308,15 +308,15 @@ Handler_read_rnd_next 0
# when extended_keys=on
#
create table t0 (a int);
-insert into t0 values (1), (2), (3), (4), (5);
+insert into t0 select seq from seq_1_to_5;
create index i_p_size on part(p_size);
explain
-select * from t0, part ignore index (primary)
+select straight_join * from t0, part ignore index (primary)
where p_partkey=t0.a and p_size=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t0 ALL NULL NULL NULL NULL 5 Using where
1 SIMPLE part eq_ref i_p_size i_p_size 9 const,dbt3_s001.t0.a 1
-select * from t0, part ignore index (primary)
+select straight_join * from t0, part ignore index (primary)
where p_partkey=t0.a and p_size=1;
a p_partkey p_name p_mfgr p_brand p_type p_size p_container p_retailprice p_comment
2 2 blush rosy metallic lemon navajo Manufacturer#1 Brand#13 LARGE BRUSHED BRASS 1 LG CASE 902 final platelets hang f
@@ -339,8 +339,8 @@ a b
EXPLAIN
SELECT * FROM t1 WHERE 2 IN (SELECT MAX(s1.a) FROM t1 AS s1, t1 AS s2);
id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2
1 PRIMARY <subquery2> const distinct_key distinct_key 4 const 1
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join)
2 MATERIALIZED s1 ALL NULL NULL NULL NULL 2
2 MATERIALIZED s2 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join)
DROP TABLE t1;
@@ -381,17 +381,18 @@ INSERT INTO t2 VALUES
(10), (11), (12), (13), (14),
(15), (16), (17), (18), (19), (24);
EXPLAIN
-SELECT a FROM t1 AS t, t2
-WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
+SELECT a FROM t1 AS t, t2 as t2_out
+WHERE t2_out.c = t.a AND t.b IN (SELECT b FROM t1, t2 WHERE b = t.b);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t index a,b b 7 NULL 10 Using index
-1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t.a 1 Using index
-1 PRIMARY t1 ref b b 3 test.t.b 2 Using index; Start temporary
+1 PRIMARY t1 index b b 7 NULL 10 Using index; Start temporary
+1 PRIMARY t ref a,b b 3 test.t1.b 2 Using index
1 PRIMARY t2 index NULL PRIMARY 4 NULL 11 Using index; End temporary; Using join buffer (flat, BNL join)
-SELECT a FROM t1 AS t, t2
-WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
+1 PRIMARY t2_out eq_ref PRIMARY PRIMARY 4 test.t.a 1 Using index
+SELECT a FROM t1 AS t, t2 as t2_out
+WHERE t2_out.c = t.a AND t.b IN (SELECT b FROM t1, t2 WHERE b = t.b);
a
24
+Last_query_cost 0.120558
DROP TABLE t1,t2;
#
# LP Bug #923236: hash join + extended_keys = on
@@ -645,7 +646,7 @@ a
2
explain select a from t2 where b is null order by a desc limit 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range b b 9 NULL 3 Using where; Using filesort
+1 SIMPLE t2 index b PRIMARY 8 NULL 2 Using where
select a from t2 where b is null order by a desc limit 2;
a
3
@@ -758,12 +759,13 @@ PRIMARY KEY (pk1,pk2),
KEY(f2)
) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
-explain format= json
+explain format=json
select * from t1 force index(f2) where pk1 <= 5 and pk2 <=5 and f2 = 'abc' and f1 <= '3';
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -773,7 +775,9 @@ EXPLAIN
"key": "f2",
"key_length": "3070",
"used_key_parts": ["f2", "pk1"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.pk1 <= 5 and t1.pk2 <= 5 and t1.f2 = 'abc'",
"attached_condition": "t1.f1 <= '3'"
@@ -792,12 +796,13 @@ PRIMARY KEY (pk1,pk2),
KEY k1(pk1,f2)
) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
-explain format= json
+explain format=json
select * from t1 force index(k1) where f2 <= 5 and pk2 <=5 and pk1 = 'abc' and f1 <= '3';
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -807,7 +812,9 @@ EXPLAIN
"key": "k1",
"key_length": "3011",
"used_key_parts": ["pk1", "f2", "pk2"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.f2 <= 5 and t1.pk2 <= 5 and t1.pk1 = 'abc'",
"attached_condition": "t1.f1 <= '3'"
diff --git a/mysql-test/main/innodb_ext_key.test b/mysql-test/main/innodb_ext_key.test
index 41a55232001..b14a5d5cab3 100644
--- a/mysql-test/main/innodb_ext_key.test
+++ b/mysql-test/main/innodb_ext_key.test
@@ -4,6 +4,7 @@
--source include/innodb_prefix_index_cluster_optimization.inc
--source include/no_valgrind_without_big.inc
+--source include/have_sequence.inc
SET SESSION DEFAULT_STORAGE_ENGINE='InnoDB';
@@ -156,14 +157,14 @@ show status like 'handler_read%';
--echo #
create table t0 (a int);
-insert into t0 values (1), (2), (3), (4), (5);
+insert into t0 select seq from seq_1_to_5;
create index i_p_size on part(p_size);
explain
-select * from t0, part ignore index (primary)
+select straight_join * from t0, part ignore index (primary)
where p_partkey=t0.a and p_size=1;
-select * from t0, part ignore index (primary)
+select straight_join * from t0, part ignore index (primary)
where p_partkey=t0.a and p_size=1;
drop table t0;
@@ -240,10 +241,11 @@ INSERT INTO t2 VALUES
(15), (16), (17), (18), (19), (24);
EXPLAIN
-SELECT a FROM t1 AS t, t2
- WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
-SELECT a FROM t1 AS t, t2
- WHERE c = a AND b IN (SELECT b FROM t1, t2 WHERE b = t.b);
+SELECT a FROM t1 AS t, t2 as t2_out
+ WHERE t2_out.c = t.a AND t.b IN (SELECT b FROM t1, t2 WHERE b = t.b);
+SELECT a FROM t1 AS t, t2 as t2_out
+ WHERE t2_out.c = t.a AND t.b IN (SELECT b FROM t1, t2 WHERE b = t.b);
+--source include/last_query_cost.inc
DROP TABLE t1,t2;
@@ -612,7 +614,8 @@ CREATE TABLE t1 (
) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
-explain format= json
+--source include/explain-no-costs.inc
+explain format=json
select * from t1 force index(f2) where pk1 <= 5 and pk2 <=5 and f2 = 'abc' and f1 <= '3';
drop table t1;
@@ -625,7 +628,8 @@ PRIMARY KEY (pk1,pk2),
KEY k1(pk1,f2)
) ENGINE=InnoDB CHARSET utf8 ROW_FORMAT= DYNAMIC;
INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
-explain format= json
+--source include/explain-no-costs.inc
+explain format=json
select * from t1 force index(k1) where f2 <= 5 and pk2 <=5 and pk1 = 'abc' and f1 <= '3';
drop table t1;
diff --git a/mysql-test/main/innodb_icp.result b/mysql-test/main/innodb_icp.result
index c89d49cad0c..bdbc2f6ccc1 100644
--- a/mysql-test/main/innodb_icp.result
+++ b/mysql-test/main/innodb_icp.result
@@ -437,7 +437,7 @@ WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON it.i=it.i WHERE it.pk-t1.i<10
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL # Using where
2 DEPENDENT SUBQUERY it eq_ref PRIMARY PRIMARY 4 func # Using where
-2 DEPENDENT SUBQUERY t2 index NULL PRIMARY 4 NULL # Using index; Using join buffer (flat, BNL join)
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL # Using join buffer (flat, BNL join)
SELECT * FROM t1
WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON it.i=it.i WHERE it.pk-t1.i<10);
pk i
@@ -455,9 +455,10 @@ c1 INT NOT NULL,
PRIMARY KEY (pk)
);
INSERT INTO t1 VALUES (1,9),(2,7),(3,6),(4,3),(5,1);
+insert into t1 select seq,seq from seq_100_to_110;
EXPLAIN SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 15 Using where
SET SESSION optimizer_switch='index_condition_pushdown=off';
SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
pk c1
@@ -465,6 +466,17 @@ pk c1
2 7
4 3
5 1
+100 100
+101 101
+102 102
+103 103
+104 104
+105 105
+106 106
+107 107
+108 108
+109 109
+110 110
DROP TABLE t1;
set optimizer_switch= @save_optimizer_switch;
#
@@ -682,7 +694,6 @@ DROP TABLE t1;
#
CREATE TABLE t1 (b int NOT NULL, c int, a varchar(1024), PRIMARY KEY (b));
INSERT INTO t1 VALUES (1,4,'Ill');
-insert into t1 select seq+100,5,seq from seq_1_to_100;
CREATE TABLE t2 (a varchar(1024), KEY (a(512)));
INSERT INTO t2 VALUES
('Ill'), ('eckqzsflbzaffti'), ('w'), ('she'), ('gxbwypqtjzwywwer'), ('w');
@@ -816,6 +827,8 @@ test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SET @save_optimize_switch=@@optimizer_switch;
SET optimizer_switch='materialization=on';
+set @save_optimizer_where_cost=@@optimizer_where_cost;
+set @@optimizer_where_cost=1;
EXPLAIN
SELECT COUNT(*) FROM t1 AS t, t2
WHERE c = g
@@ -839,6 +852,7 @@ OR a = 0 AND h < 'z' );
COUNT(*)
1478
SET optimizer_switch=@save_optimizer_switch;
+set @@optimizer_where_cost=@save_optimizer_where_cost;
DROP TABLE t1,t2;
# check "Handler_pushed" status varuiables
CREATE TABLE t1 (
diff --git a/mysql-test/main/innodb_mysql_lock2.result b/mysql-test/main/innodb_mysql_lock2.result
index ffbe3f8a406..9dd5bddc085 100644
--- a/mysql-test/main/innodb_mysql_lock2.result
+++ b/mysql-test/main/innodb_mysql_lock2.result
@@ -57,8 +57,6 @@ declare j int;
select i from t1 where i = 1 into j;
return j;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f2() returns int
begin
declare k int;
@@ -66,8 +64,6 @@ select i from t1 where i = 1 into k;
insert into t2 values (k + 5);
return 0;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f3() returns int
begin
return (select i from t1 where i = 3);
@@ -91,16 +87,12 @@ declare k int;
select i from v1 where i = 1 into k;
return k;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f7() returns int
begin
declare k int;
select j from v2 where j = 1 into k;
return k;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f8() returns int
begin
declare k int;
@@ -108,8 +100,6 @@ select i from v1 where i = 1 into k;
insert into t2 values (k+5);
return k;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f9() returns int
begin
update v2 set j=j+10 where j=1;
@@ -139,8 +129,6 @@ create procedure p2(inout p int)
begin
select i from t1 where i = 1 into p;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f14() returns int
begin
declare k int;
@@ -160,8 +148,6 @@ declare k int;
select i from t1 where i=1 into k;
set new.l= k+1;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create trigger t4_bu before update on t4 for each row
begin
if (select i from t1 where i=1) then
diff --git a/mysql-test/main/innodb_mysql_lock2.test b/mysql-test/main/innodb_mysql_lock2.test
index 09298a900b1..e5bb4bd9565 100644
--- a/mysql-test/main/innodb_mysql_lock2.test
+++ b/mysql-test/main/innodb_mysql_lock2.test
@@ -833,6 +833,7 @@ insert into t1 values (1), (2), (3), (4), (5);
begin;
--echo # Acquire SR metadata lock on t1.
+--sorted_result
select * from t1;
connection con1;
diff --git a/mysql-test/main/intersect.result b/mysql-test/main/intersect.result
index 425f6940a35..83a607f4e45 100644
--- a/mysql-test/main/intersect.result
+++ b/mysql-test/main/intersect.result
@@ -51,12 +51,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -67,12 +70,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -83,12 +89,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -115,6 +124,7 @@ ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -122,9 +132,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -138,6 +150,7 @@ ANALYZE
"query_block": {
"select_id": 2,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -145,9 +158,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -161,6 +176,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -168,9 +184,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -192,6 +210,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -199,9 +218,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -217,6 +238,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -224,9 +246,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -240,6 +264,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -247,9 +272,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -263,6 +290,7 @@ ANALYZE
"query_block": {
"select_id": 4,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -270,9 +298,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -346,12 +376,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -362,12 +395,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -376,7 +412,9 @@ EXPLAIN
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 3,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -407,6 +445,7 @@ ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -414,9 +453,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -430,6 +471,7 @@ ANALYZE
"query_block": {
"select_id": 2,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -437,9 +479,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -451,9 +495,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 3,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -462,7 +508,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "256Kb",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -480,6 +527,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -487,9 +535,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -505,6 +555,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -512,9 +563,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -528,6 +581,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -535,9 +589,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -549,9 +605,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 3,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -560,7 +618,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "256Kb",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
diff --git a/mysql-test/main/intersect.test b/mysql-test/main/intersect.test
index 5e811f1f56d..30f99b20aa2 100644
--- a/mysql-test/main/intersect.test
+++ b/mysql-test/main/intersect.test
@@ -23,6 +23,7 @@ insert into t3 values (1,1),(2,2),(5,5);
EXPLAIN (select a,b from t1) intersect (select c,d from t2) intersect (select e,f from t3);
EXPLAIN extended (select a,b from t1) intersect (select c,d from t2) intersect (select e,f from t3);
EXPLAIN extended select * from ((select a,b from t1) intersect (select c,d from t2) intersect (select e,f from t3)) a;
+--source include/explain-no-costs.inc
EXPLAIN format=json (select a,b from t1) intersect (select c,d from t2) intersect (select e,f from t3);
--source include/analyze-format.inc
ANALYZE format=json (select a,b from t1) intersect (select c,d from t2) intersect (select e,f from t3);
@@ -43,6 +44,7 @@ EXPLAIN (select a,b from t1) intersect (select c,e from t2,t3);
EXPLAIN extended (select a,b from t1) intersect (select c,e from t2,t3);
EXPLAIN extended select * from ((select a,b from t1) intersect (select c,e from t2,t3)) a;
set @@optimizer_switch='optimize_join_buffer_size=off';
+--source include/explain-no-costs.inc
EXPLAIN format=json (select a,b from t1) intersect (select c,e from t2,t3);
--source include/analyze-format.inc
ANALYZE format=json (select a,b from t1) intersect (select c,e from t2,t3);
diff --git a/mysql-test/main/intersect_all.result b/mysql-test/main/intersect_all.result
index aecd5b1ac18..8f826a193aa 100644
--- a/mysql-test/main/intersect_all.result
+++ b/mysql-test/main/intersect_all.result
@@ -63,12 +63,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -79,12 +82,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -95,12 +101,15 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -127,6 +136,7 @@ ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -134,9 +144,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -150,6 +162,7 @@ ANALYZE
"query_block": {
"select_id": 2,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -157,9 +170,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -173,6 +188,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -180,9 +196,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -204,6 +222,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -211,9 +230,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -229,6 +250,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -236,9 +258,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -252,6 +276,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -259,9 +284,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -275,6 +302,7 @@ ANALYZE
"query_block": {
"select_id": 4,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -282,9 +310,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -377,12 +407,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 6,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -393,12 +426,15 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "INTERSECT",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -407,7 +443,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 5,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -438,6 +476,7 @@ ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -445,9 +484,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 6,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -461,6 +502,7 @@ ANALYZE
"query_block": {
"select_id": 2,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -468,9 +510,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 5,
"r_rows": 5,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -482,9 +526,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 5,
"r_loops": 1,
"rows": 7,
"r_rows": 7,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -493,7 +539,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "65",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -511,6 +558,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -518,9 +566,11 @@ ANALYZE
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 6,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -536,6 +586,7 @@ ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -543,9 +594,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 6,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -559,6 +612,7 @@ ANALYZE
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -566,9 +620,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 5,
"r_rows": 5,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -580,9 +636,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 5,
"r_loops": 1,
"rows": 7,
"r_rows": 7,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -591,7 +649,8 @@ ANALYZE
"buffer_type": "flat",
"buffer_size": "65",
"join_type": "BNL",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
diff --git a/mysql-test/main/intersect_all.test b/mysql-test/main/intersect_all.test
index 5d2b038fde9..769b25393dd 100644
--- a/mysql-test/main/intersect_all.test
+++ b/mysql-test/main/intersect_all.test
@@ -22,6 +22,7 @@ insert into t3 values (1,1),(2,2),(5,5),(2,2);
EXPLAIN (select a,b from t1) intersect all (select c,d from t2) intersect all (select e,f from t3);
EXPLAIN extended (select a,b from t1) intersect all (select c,d from t2) intersect all (select e,f from t3);
EXPLAIN extended select * from ((select a,b from t1) intersect all (select c,d from t2) intersect all (select e,f from t3)) a;
+--source include/explain-no-costs.inc
EXPLAIN format=json (select a,b from t1) intersect all (select c,d from t2) intersect all (select e,f from t3);
--source include/analyze-format.inc
ANALYZE format=json (select a,b from t1) intersect all (select c,d from t2) intersect all (select e,f from t3);
@@ -50,6 +51,7 @@ insert into t3 values (2,2);
EXPLAIN (select a,b from t1) intersect all (select c,e from t2,t3);
EXPLAIN extended (select a,b from t1) intersect all (select c,e from t2,t3);
EXPLAIN extended select * from ((select a,b from t1) intersect all (select c,e from t2,t3)) a;
+--source include/explain-no-costs.inc
EXPLAIN format=json (select a,b from t1) intersect all (select c,e from t2,t3);
--source include/analyze-format.inc
ANALYZE format=json (select a,b from t1) intersect all (select c,e from t2,t3);
@@ -325,4 +327,4 @@ select * from t2 where a < 5
intersect all
select * from t3 where a < 5;
-drop table t1,t2,t3; \ No newline at end of file
+drop table t1,t2,t3;
diff --git a/mysql-test/main/invisible_field.result b/mysql-test/main/invisible_field.result
index 7aa88e7787e..988f8524345 100644
--- a/mysql-test/main/invisible_field.result
+++ b/mysql-test/main/invisible_field.result
@@ -559,8 +559,6 @@ DROP TABLE t1;
create or replace table t1 (a int, b int invisible);
insert into t1 values (1),(2);
select * from t1 into outfile 'f';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
load data infile 'f' into table t1;
select a,b from t1;
a b
@@ -591,8 +589,6 @@ a b
truncate table t1;
insert into t1(a,b) values (1,1),(2,2);
select a,b from t1 into outfile 'a';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
load data infile 'a' into table t1(a,b);
select a,b from t1;
a b
diff --git a/mysql-test/main/invisible_field_debug.result b/mysql-test/main/invisible_field_debug.result
index 344a0b860fc..8cc1ee4db9c 100644
--- a/mysql-test/main/invisible_field_debug.result
+++ b/mysql-test/main/invisible_field_debug.result
@@ -346,7 +346,7 @@ invisible a b
9 7 7
explain select * from t1 where invisible =9;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL invisible NULL NULL NULL 7 Using where
+1 SIMPLE t1 ref invisible invisible 5 const 7
alter table t1 add x int default 3;
select invisible, a ,b from t1;
invisible a b
@@ -368,11 +368,11 @@ drop index invisible on t1;
ERROR 42000: Can't DROP INDEX `invisible`; check that it exists
explain select * from t1 where invisible =9;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL invisible NULL NULL NULL 7 Using where
+1 SIMPLE t1 ref invisible invisible 5 const 7
create index invisible on t1(c);
explain select * from t1 where invisible =9;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL invisible_2 NULL NULL NULL 7 Using where
+1 SIMPLE t1 ref invisible_2 invisible_2 5 const 7
show indexes in t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Ignored
t1 1 b 1 b A NULL NULL NULL YES BTREE NO
diff --git a/mysql-test/main/join.result b/mysql-test/main/join.result
index 59d6ae98ff3..a6c2f88b841 100644
--- a/mysql-test/main/join.result
+++ b/mysql-test/main/join.result
@@ -65,7 +65,7 @@ id id
NULL 75
explain select t1.id,t2.id from t2 left join t1 on t1.id>=74 and t1.id<=0 where t2.id=75 and t1.id is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 const PRIMARY NULL NULL NULL 1 Impossible ON condition
+1 SIMPLE t1 const PRIMARY NULL NULL NULL 0 Impossible ON condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 3 Using where
explain select t1.id, t2.id from t1, t2 where t2.id = t1.id and t1.id <0 and t1.id > 0;
id select_type table type possible_keys key key_len ref rows Extra
@@ -258,7 +258,7 @@ name varchar(255) default NULL,
PRIMARY KEY (id)
) ENGINE=MyISAM;
INSERT INTO t2 VALUES (1,'s1'),(2,'s2'),(3,'s3'),(4,'s4'),(5,'s5');
-select t1.*, t2.* from t1, t2 where t2.id=t1.t2_id limit 2;
+select straight_join t1.*, t2.* from t2, t1 where t2.id=t1.t2_id limit 2;
t1_id t2_id type cost_unit min_value max_value t3_id item_id id name
12 5 Percent Cost -1 0 -1 -1 5 s5
14 4 Percent Cost -1 0 -1 -1 4 s4
@@ -883,7 +883,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL a,b NULL NULL NULL 1000 Using where
1 SIMPLE t3 ref b b 5 test.t2.b 1
drop table t1, t2, t3;
-create table t1 (a int);
+create table t1 (a int) engine=myisam;
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t2 (a int, b int, primary key(a));
insert into t2 select @v:=A.a+10*B.a, @v from t1 A, t1 B;
@@ -892,13 +892,17 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 4.016090
+Last_query_cost 0.011600
select 'The cost of accessing t1 (dont care if it changes' '^';
The cost of accessing t1 (dont care if it changes
The cost of accessing t1 (dont care if it changes^
select 'vv: Following query must use ALL(t1), eq_ref(A), eq_ref(B): vv' Z;
Z
vv: Following query must use ALL(t1), eq_ref(A), eq_ref(B): vv
+select @@myisam.optimizer_disk_read_ratio;
+@@myisam.optimizer_disk_read_ratio
+0.020000
+set global myisam.optimizer_disk_read_ratio=0;
explain select * from t1, t2 A, t2 B where A.a = t1.a and B.a=A.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using where
@@ -906,10 +910,14 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE B eq_ref PRIMARY PRIMARY 4 test.A.b 1
show status like 'Last_query_cost';
Variable_name Value
-Last_query_cost 28.016090
-select '^^: The above should be ~= 20 + cost(select * from t1). Value less than 20 is an error' Z;
+Last_query_cost 0.046590
+select '^^: The above should be ~= 40 + cost(select * from t1). Value less than 40 is an error' Z;
Z
-^^: The above should be ~= 20 + cost(select * from t1). Value less than 20 is an error
+^^: The above should be ~= 40 + cost(select * from t1). Value less than 40 is an error
+set global myisam.optimizer_disk_read_ratio=default;
+select @@myisam.optimizer_disk_read_ratio;
+@@myisam.optimizer_disk_read_ratio
+0.020000
drop table t1, t2;
CREATE TABLE t1 (a INT PRIMARY KEY, b INT);
CREATE TABLE t2 (c INT PRIMARY KEY, d INT);
@@ -1107,7 +1115,7 @@ ON t4.a = t5.a
ON t1.a = t3.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3
-1 SIMPLE t3 ref a a 5 test.t1.a 2 Using where; Using index
+1 SIMPLE t3 ref a a 5 test.t1.a 1 Using where; Using index
1 SIMPLE t4 ALL NULL NULL NULL NULL 0 Using where
1 SIMPLE t5 ALL NULL NULL NULL NULL 0 Using where
1 SIMPLE t6 ALL NULL NULL NULL NULL 0 Using where
@@ -1274,13 +1282,17 @@ test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+explain SELECT * FROM t1 JOIN t2 ON t1.v = t2.v WHERE t2.v IS NULL ORDER BY 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ref ix2 ix2 5 const 2 Using index condition; Using temporary; Using filesort
+1 SIMPLE t1 ref ix1 ix1 5 test.t2.v 1
FLUSH STATUS;
SELECT * FROM t1 JOIN t2 ON t1.v = t2.v WHERE t2.v IS NULL ORDER BY 1;
pk v pk v
SHOW STATUS LIKE 'Handler_read_%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 14
+Handler_read_key 1
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
@@ -1323,9 +1335,9 @@ FROM t4 JOIN
(t1 JOIN t3 ON t3.ref_t1=t1.c1 JOIN t2 ON t2.ref_t1=t1.c1)
ON t4.ref_t1=t1.c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL NULL NULL NULL NULL 4
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t3 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE t4 ALL NULL NULL NULL NULL 4 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t4.ref_t1 1
+1 SIMPLE t3 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t2 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join)
EXPLAIN
SELECT *
@@ -1334,9 +1346,9 @@ FROM t4 STRAIGHT_JOIN
ON t4.ref_t1=t1.c1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 ALL NULL NULL NULL NULL 4 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t4.ref_t1 1
-1 SIMPLE t3 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t3 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (incremental, BNL join)
drop table t1,t2,t3,t4;
End of 5.2 tests
#
@@ -1474,7 +1486,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE D system PRIMARY NULL NULL NULL 1
1 SIMPLE DSAR system NULL NULL NULL NULL 1
1 SIMPLE DSA ref PRIMARY PRIMARY 4 const 3 Using where; Using index
-1 SIMPLE DT ALL t_id NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE DT ref t_id t_id 2 test.DSA.t_id 1 Using where
SELECT * FROM t5 DU, t1 D, t4 DT, t2 DSA, t3 DSAR
WHERE DU.dog_id=D.dog_id AND D.dog_id=DT.dog_id AND D.birthday=DT.birthday AND
DT.t_id=DSA.t_id AND DT.birthday=DSA.birthday AND DSA.dog_id=DSAR.dog_id;
@@ -3424,3 +3436,24 @@ COUNT(*)
2
DROP TABLE t1, t2, t3;
# End of 10.5 tests
+#
+# MDEV-30256 Wrong result (missing rows) upon join with empty table
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b INT);
+INSERT INTO t2 VALUES (3),(4);
+CREATE TABLE t3 (c INT PRIMARY KEY);
+EXPLAIN SELECT * FROM t1 LEFT JOIN t2 JOIN t3 ON (t2.b >= t3.c) ON (t1.a < t2.b);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2
+1 SIMPLE t3 index PRIMARY PRIMARY 4 NULL 0 Using index; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (incremental, BNL join)
+SELECT * FROM t1 LEFT JOIN t2 JOIN t3 ON (t2.b >= t3.c) ON (t1.a < t2.b);
+a b c
+1 NULL NULL
+2 NULL NULL
+DROP TABLE t1,t2,t3;
+#
+# End of 11.0 tests
+#
diff --git a/mysql-test/main/join.test b/mysql-test/main/join.test
index 1668787d33e..453cf8c2313 100644
--- a/mysql-test/main/join.test
+++ b/mysql-test/main/join.test
@@ -253,7 +253,7 @@ CREATE TABLE t2 (
PRIMARY KEY (id)
) ENGINE=MyISAM;
INSERT INTO t2 VALUES (1,'s1'),(2,'s2'),(3,'s3'),(4,'s4'),(5,'s5');
-select t1.*, t2.* from t1, t2 where t2.id=t1.t2_id limit 2;
+select straight_join t1.*, t2.* from t2, t1 where t2.id=t1.t2_id limit 2;
drop table t1,t2;
#
@@ -685,7 +685,7 @@ drop table t1, t2, t3;
# BUG#14940 {Wrong query plan is chosen because of odd results of
# prev_record_reads() function }
-create table t1 (a int);
+create table t1 (a int) engine=myisam;
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t2 (a int, b int, primary key(a));
@@ -697,12 +697,13 @@ select 'The cost of accessing t1 (dont care if it changes' '^';
select 'vv: Following query must use ALL(t1), eq_ref(A), eq_ref(B): vv' Z;
+select @@myisam.optimizer_disk_read_ratio;
+set global myisam.optimizer_disk_read_ratio=0;
explain select * from t1, t2 A, t2 B where A.a = t1.a and B.a=A.b;
show status like 'Last_query_cost';
-select '^^: The above should be ~= 20 + cost(select * from t1). Value less than 20 is an error' Z;
-
-
-
+select '^^: The above should be ~= 40 + cost(select * from t1). Value less than 40 is an error' Z;
+set global myisam.optimizer_disk_read_ratio=default;
+select @@myisam.optimizer_disk_read_ratio;
drop table t1, t2;
#
@@ -957,6 +958,9 @@ INSERT INTO t1 VALUES (3,'b'),(4,NULL),(5,'c'),(6,'cc'),(7,'d'),
(8,'dd'),(9,'e'),(10,'ee');
INSERT INTO t2 VALUES (2,NULL);
ANALYZE TABLE t1,t2;
+# This will ensure that status tables are read now and not part of the later
+# Handler_read% counts
+explain SELECT * FROM t1 JOIN t2 ON t1.v = t2.v WHERE t2.v IS NULL ORDER BY 1;
FLUSH STATUS;
SELECT * FROM t1 JOIN t2 ON t1.v = t2.v WHERE t2.v IS NULL ORDER BY 1;
SHOW STATUS LIKE 'Handler_read_%';
@@ -1835,3 +1839,20 @@ SELECT COUNT(*) FROM t1 LEFT JOIN (t2 LEFT JOIN t3 ON t2.b = t3.c) ON t1.a = t2.
DROP TABLE t1, t2, t3;
--echo # End of 10.5 tests
+
+--echo #
+--echo # MDEV-30256 Wrong result (missing rows) upon join with empty table
+--echo #
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b INT);
+INSERT INTO t2 VALUES (3),(4);
+CREATE TABLE t3 (c INT PRIMARY KEY);
+EXPLAIN SELECT * FROM t1 LEFT JOIN t2 JOIN t3 ON (t2.b >= t3.c) ON (t1.a < t2.b);
+SELECT * FROM t1 LEFT JOIN t2 JOIN t3 ON (t2.b >= t3.c) ON (t1.a < t2.b);
+DROP TABLE t1,t2,t3;
+
+--echo #
+--echo # End of 11.0 tests
+--echo #
diff --git a/mysql-test/main/join_cache.result b/mysql-test/main/join_cache.result
index 819eaf48dbb..c02ac192dde 100644
--- a/mysql-test/main/join_cache.result
+++ b/mysql-test/main/join_cache.result
@@ -53,6 +53,7 @@ set join_cache_level=1;
show variables like 'join_cache_level';
Variable_name Value
join_cache_level 1
+# Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
@@ -60,6 +61,7 @@ Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (flat, BNL join)
+# Query 2
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -79,6 +81,7 @@ Tripoli Lebanon
Tripoli Libyan Arab Jamahiriya
Vientiane Laos
Vilnius Lithuania
+# Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -91,6 +94,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE CountryLanguage ALL NULL NULL NULL NULL 984 Using where; Using join buffer (flat, BNL join)
1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (flat, BNL join)
+# Query 4
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -131,6 +135,7 @@ set join_cache_level=2;
show variables like 'join_cache_level';
Variable_name Value
join_cache_level 2
+# join_cache_level 2, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
@@ -138,6 +143,7 @@ Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (flat, BNL join)
+# join_cache_level 2, Query 2
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -157,6 +163,7 @@ Tripoli Lebanon
Tripoli Libyan Arab Jamahiriya
Vientiane Laos
Vilnius Lithuania
+# join_cache_level 2, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -169,6 +176,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE CountryLanguage ALL NULL NULL NULL NULL 984 Using where; Using join buffer (flat, BNL join)
1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (incremental, BNL join)
+# join_cache_level 2, Query 4
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -209,6 +217,7 @@ set join_cache_level=3;
show variables like 'join_cache_level';
Variable_name Value
join_cache_level 3
+# join_cache_level 3, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
@@ -216,6 +225,7 @@ Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+# join_cache_level 3, Query 2
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -235,6 +245,7 @@ Tripoli Lebanon
Tripoli Libyan Arab Jamahiriya
Vientiane Laos
Vilnius Lithuania
+# join_cache_level 3, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -247,6 +258,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.Country.Code 984 Using where; Using join buffer (flat, BNLH join)
1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+# join_cache_level 3, Query 4
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -287,6 +299,7 @@ set join_cache_level=4;
show variables like 'join_cache_level';
Variable_name Value
join_cache_level 4
+# join_cache_level 4, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
@@ -294,6 +307,7 @@ Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+# join_cache_level 4, Query 2
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -313,6 +327,7 @@ Tripoli Lebanon
Tripoli Libyan Arab Jamahiriya
Vientiane Laos
Vilnius Lithuania
+# join_cache_level 4, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -325,6 +340,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.Country.Code 984 Using where; Using join buffer (flat, BNLH join)
1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (incremental, BNLH join)
+# join_cache_level 4, Query 4
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -361,6 +377,7 @@ Ludwigshafen am Rhein Germany German
Lungtan Taiwan Min
L´Hospitalet de Llobregat Spain Spanish
Lázaro Cárdenas Mexico Spanish
+# join_cache_level 4, Query 5
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND City.Population > 5000000
@@ -379,6 +396,7 @@ Canada 31147000 NULL NULL
Cuba 11201000 NULL NULL
Côte d?Ivoire 14786000 NULL NULL
Czech Republic 10278100 NULL NULL
+# join_cache_level 4, Query 6
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND
@@ -404,6 +422,7 @@ Czech Republic 10278100 NULL NULL
CREATE INDEX City_Population ON City(Population);
CREATE INDEX City_Name ON City(Name);
ANALYZE TABLE City;
+# After Analyze, Query 1
EXPLAIN
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
@@ -412,6 +431,7 @@ WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE City hash_range City_Population #hash#$hj:City_Population 3:4 world.Country.Code 24 Using where; Rowid-ordered scan; Using join buffer (flat, BNLH join)
+# After Analyze, Query 2
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND City.Population > 5000000
@@ -430,6 +450,7 @@ Canada 31147000 NULL NULL
Cuba 11201000 NULL NULL
Côte d?Ivoire 14786000 NULL NULL
Czech Republic 10278100 NULL NULL
+# After Analyze, Query 3
EXPLAIN
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
@@ -439,6 +460,7 @@ WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE City hash_index_merge City_Population,City_Name #hash#$hj:City_Population,City_Name 3:4,35 world.Country.Code 96 Using sort_union(City_Population,City_Name); Using where; Using join buffer (flat, BNLH join)
+# After Analyze, Query 4
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND
@@ -471,6 +493,7 @@ join_buffer_size 256
show variables like 'join_cache_level';
Variable_name Value
join_cache_level 1
+# join_cache_level 1, Join_buffer_size, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
@@ -478,6 +501,7 @@ Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (flat, BNL join)
+# join_cache_level 1, Join_buffer_size, Query 2
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -497,6 +521,7 @@ Tripoli Lebanon
Tripoli Libyan Arab Jamahiriya
Vientiane Laos
Vilnius Lithuania
+# join_cache_level 1, Join_buffer_size, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -509,6 +534,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE CountryLanguage ALL NULL NULL NULL NULL 984 Using where; Using join buffer (flat, BNL join)
1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (flat, BNL join)
+# join_cache_level 1, Join_buffer_size, Query 4
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -549,6 +575,7 @@ set join_cache_level=2;
show variables like 'join_cache_level';
Variable_name Value
join_cache_level 2
+# join_cache_level 2, Join_buffer_size, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
@@ -556,6 +583,7 @@ Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (flat, BNL join)
+# join_cache_level 2, Join_buffer_size, Query 2
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -575,6 +603,7 @@ Tripoli Lebanon
Tripoli Libyan Arab Jamahiriya
Vientiane Laos
Vilnius Lithuania
+# join_cache_level 2, Join_buffer_size, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -587,6 +616,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE CountryLanguage ALL NULL NULL NULL NULL 984 Using where; Using join buffer (flat, BNL join)
1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (incremental, BNL join)
+# join_cache_level 2, Join_buffer_size, Query 4
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -627,6 +657,7 @@ set join_cache_level=3;
show variables like 'join_cache_level';
Variable_name Value
join_cache_level 3
+# join_cache_level 3, Join_buffer_size, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
@@ -634,6 +665,7 @@ Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+# join_cache_level 3, Join_buffer_size, Query 2
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -653,6 +685,7 @@ Tripoli Lebanon
Tripoli Libyan Arab Jamahiriya
Vientiane Laos
Vilnius Lithuania
+# join_cache_level 3, Join_buffer_size, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -665,6 +698,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.Country.Code 984 Using where; Using join buffer (flat, BNLH join)
1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+# join_cache_level 3, Join_buffer_size, Query 4
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -705,6 +739,7 @@ set join_cache_level=4;
show variables like 'join_cache_level';
Variable_name Value
join_cache_level 4
+# join_cache_level 4, Join_buffer_size, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
@@ -712,6 +747,7 @@ Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+# join_cache_level 4, Join_buffer_size, Query 2
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -731,6 +767,7 @@ Tripoli Lebanon
Tripoli Libyan Arab Jamahiriya
Vientiane Laos
Vilnius Lithuania
+# join_cache_level 4, Join_buffer_size, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -743,6 +780,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.Country.Code 984 Using where; Using join buffer (flat, BNLH join)
1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (incremental, BNLH join)
+# join_cache_level 4, Join_buffer_size, Query 4
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -816,6 +854,7 @@ set join_cache_level=3;
show variables like 'join_cache_level';
Variable_name Value
join_cache_level 3
+# Part 2, join_cache_level=3, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
@@ -823,6 +862,7 @@ Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
1 SIMPLE City hash_ALL Population,Country #hash#Country 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+# Part 2, join_cache_level=3, Query 2
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -842,6 +882,7 @@ Tripoli Lebanon
Tripoli Libyan Arab Jamahiriya
Vientiane Laos
Vilnius Lithuania
+# Part 2, join_cache_level=3, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -854,6 +895,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join)
1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (flat, BNLH join); Using rowid filter
+# Part 2, join_cache_level=3, Query 4
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -890,6 +932,7 @@ Ludwigshafen am Rhein Germany German
Lungtan Taiwan Min
L´Hospitalet de Llobregat Spain Spanish
Lázaro Cárdenas Mexico Spanish
+# Part 2, join_cache_level=3, Query 5
EXPLAIN
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
@@ -897,6 +940,7 @@ City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
1 PRIMARY City hash_ALL Population,Country #hash#Country 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+# Part 2, join_cache_level=3, Query 6
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
@@ -916,6 +960,7 @@ Kaunas
Klaipeda
?iauliai
Panevezys
+# Part 2, join_cache_level=3, Query 7
EXPLAIN
SELECT Country.Name, IF(ISNULL(CountryLanguage.Country), NULL, CountryLanguage.Percentage)
FROM Country LEFT JOIN CountryLanguage ON
@@ -925,6 +970,7 @@ Country.Population > 10000000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE CountryLanguage hash_ALL PRIMARY #hash#PRIMARY 33 world.Country.Code,const 984 Using where; Using join buffer (flat, BNLH join)
+# Part 2, join_cache_level=3, Query 8
SELECT Country.Name, IF(ISNULL(CountryLanguage.Country), NULL, CountryLanguage.Percentage)
FROM Country LEFT JOIN CountryLanguage ON
(CountryLanguage.Country=Country.Code AND Language='English')
@@ -1016,6 +1062,7 @@ set join_cache_level=4;
show variables like 'join_cache_level';
Variable_name Value
join_cache_level 4
+# Part 2, join_cache_level=4, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
@@ -1023,6 +1070,7 @@ Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
1 SIMPLE City hash_ALL Population,Country #hash#Country 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+# Part 2, join_cache_level=4, Query 2
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -1042,6 +1090,7 @@ Tripoli Lebanon
Tripoli Libyan Arab Jamahiriya
Vientiane Laos
Vilnius Lithuania
+# Part 2, join_cache_level=4, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -1054,6 +1103,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join)
1 SIMPLE CountryLanguage hash_ALL|filter PRIMARY,Percentage #hash#PRIMARY|Percentage 3|4 world.City.Country 984 (19%) Using where; Using join buffer (incremental, BNLH join); Using rowid filter
+# Part 2, join_cache_level=4, Query 4
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -1090,6 +1140,7 @@ Ludwigshafen am Rhein Germany German
Lungtan Taiwan Min
L´Hospitalet de Llobregat Spain Spanish
Lázaro Cárdenas Mexico Spanish
+# Part 2, join_cache_level=4, Query 5
EXPLAIN
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
@@ -1097,6 +1148,7 @@ City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
1 PRIMARY City hash_ALL Population,Country #hash#Country 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+# Part 2, join_cache_level=4, Query 6
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
@@ -1116,6 +1168,7 @@ Kaunas
Klaipeda
?iauliai
Panevezys
+# Part 2, join_cache_level=4, Query 7
EXPLAIN
SELECT Country.Name, IF(ISNULL(CountryLanguage.Country), NULL, CountryLanguage.Percentage)
FROM Country LEFT JOIN CountryLanguage ON
@@ -1125,6 +1178,7 @@ Country.Population > 10000000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
1 SIMPLE CountryLanguage hash_ALL PRIMARY #hash#PRIMARY 33 world.Country.Code,const 984 Using where; Using join buffer (flat, BNLH join)
+# Part 2, join_cache_level=4, Query 8
SELECT Country.Name, IF(ISNULL(CountryLanguage.Country), NULL, CountryLanguage.Percentage)
FROM Country LEFT JOIN CountryLanguage ON
(CountryLanguage.Country=Country.Code AND Language='English')
@@ -1209,6 +1263,7 @@ Belarus NULL
Venezuela NULL
Russian Federation NULL
Vietnam NULL
+# Part 2, join_cache_level=4, Query 9
EXPLAIN
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
@@ -1217,6 +1272,7 @@ WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range Name Name 52 NULL # Using index condition; Using where; Rowid-ordered scan
1 SIMPLE City hash_range Population,Country #hash#Country:Population 3:4 world.Country.Code # Using where; Rowid-ordered scan; Using join buffer (flat, BNLH join)
+# Part 2, join_cache_level=4, Query 10
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND City.Population > 5000000
@@ -1236,6 +1292,7 @@ Cuba 11201000 NULL NULL
Côte d?Ivoire 14786000 NULL NULL
Czech Republic 10278100 NULL NULL
CREATE INDEX City_Name ON City(Name);
+# Part 2, join_cache_level=4, City_Name, Query 1
EXPLAIN
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
@@ -1245,6 +1302,7 @@ WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range Name Name 52 NULL 17 Using index condition; Using where; Rowid-ordered scan
1 SIMPLE City hash_index_merge Population,Country,City_Name #hash#Country:Population,City_Name 3:4,35 world.Country.Code 96 Using sort_union(Population,City_Name); Using where; Using join buffer (flat, BNLH join)
+# Part 2, join_cache_level=4, City_Name, Query 2
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND
@@ -3067,13 +3125,13 @@ t1.metaid = t2.metaid AND t1.affiliateid = '2';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t6 system PRIMARY NULL NULL NULL 1
1 SIMPLE t5 ref PRIMARY,t5_formattypeid t5_formattypeid 4 const 1
-1 SIMPLE t1 ref t1_affiliateid,t1_metaid t1_affiliateid 4 const 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.metaid 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t7 ref PRIMARY PRIMARY 4 test.t1.metaid 1 Using index
+1 SIMPLE t4 ref PRIMARY,t4_formatclassid,t4_formats_idx t4_formatclassid 4 test.t5.formatclassid 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t3 ref t3_metaid,t3_formatid,t3_metaidformatid t3_formatid 4 test.t4.formatid 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t1 ref t1_affiliateid,t1_metaid t1_metaid 4 test.t3.metaid 1 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t3.metaid 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t7 ref PRIMARY PRIMARY 4 test.t3.metaid 1 Using index
1 SIMPLE t8 eq_ref PRIMARY PRIMARY 4 test.t7.artistid 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t3 ref t3_metaid,t3_formatid,t3_metaidformatid t3_metaidformatid 4 test.t1.metaid 1 Using index condition; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t4 eq_ref PRIMARY,t4_formatclassid,t4_formats_idx PRIMARY 4 test.t3.formatid 1 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t9 index PRIMARY,t9_subgenreid,t9_metaid PRIMARY 8 NULL 2 Using where; Using index; Using join buffer (incremental, BNL join)
+1 SIMPLE t9 ref PRIMARY,t9_subgenreid,t9_metaid t9_metaid 4 test.t3.metaid 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t10 eq_ref PRIMARY,t10_genreid PRIMARY 4 test.t9.subgenreid 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t10.genreid 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
SELECT t1.uniquekey, t1.xml AS affiliateXml,
@@ -3215,7 +3273,7 @@ EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.a WHERE t2.b IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref i_a i_a 4 test.t1.a 2 Using where; Not exists
+1 SIMPLE t2 ref i_a i_a 4 test.t1.a 1 Using where; Not exists
SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.a WHERE t2.b IS NULL;
a a b
3 NULL NULL
@@ -3226,7 +3284,7 @@ EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.a WHERE t2.b IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref i_a i_a 4 test.t1.a 2 Using where; Not exists; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ref i_a i_a 4 test.t1.a 1 Using where; Not exists; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.a WHERE t2.b IS NULL;
a a b
3 NULL NULL
@@ -3253,7 +3311,7 @@ select t1.a, count(t2.p) as count
from t1 left join t2 on t1.a=t2.a and t2.p % 2 = 1 group by t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 4 NULL 8 Using index; Using temporary; Using filesort
-1 SIMPLE t2 ref i_a i_a 5 test.t1.a 2 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ref i_a i_a 5 test.t1.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
select t1.a, count(t2.p) as count
from t1 left join t2 on t1.a=t2.a and t2.p % 2 = 1 group by t1.a;
a count
@@ -3371,8 +3429,8 @@ set join_cache_level=6;
set join_buffer_size=1024;
EXPLAIN SELECT AVG(c) FROM t1,t2 WHERE t1.a=t2.b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 2050 Using where
-1 SIMPLE t2 ref idx idx 5 test.t1.a 640 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ALL idx NULL NULL NULL 1280 Using where
+1 SIMPLE t1 hash_ALL NULL #hash#$hj 5 test.t2.b 2050 Using where; Using join buffer (flat, BNLH join)
SELECT AVG(c) FROM t1,t2 WHERE t1.a=t2.b;
AVG(c)
5.0000
@@ -4027,7 +4085,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.carrier 1 Using where
-1 SIMPLE t4 ref carrier_id carrier_id 5 test.t3.id 2 Using index
+1 SIMPLE t4 ref carrier_id carrier_id 5 test.t3.id 1 Using index
SET join_cache_level=@save_join_cache_level;
DROP TABLE t1,t2,t3,t4;
#
@@ -4043,7 +4101,7 @@ set join_cache_level = 5;
explain SELECT t2.a FROM t1 LEFT JOIN t2 ON t2.b = t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
-1 SIMPLE t2 ref b b 5 test.t1.b 2 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ref b b 5 test.t1.b 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT t2.a FROM t1 LEFT JOIN t2 ON t2.b = t1.b;
a
NULL
@@ -4052,7 +4110,7 @@ set join_cache_level = 8;
explain SELECT t2.a FROM t1 LEFT JOIN t2 ON t2.b = t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
-1 SIMPLE t2 ref b b 5 test.t1.b 2 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ref b b 5 test.t1.b 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT t2.a FROM t1 LEFT JOIN t2 ON t2.b = t1.b;
a
NULL
@@ -4063,7 +4121,7 @@ set join_cache_level = 5;
explain SELECT t2.a FROM t1 LEFT JOIN t2 ON t2.b = t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
-1 SIMPLE t2 ref b b 5 test.t1.b 2 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ref b b 5 test.t1.b 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT t2.a FROM t1 LEFT JOIN t2 ON t2.b = t1.b;
a
NULL
@@ -4078,7 +4136,7 @@ set join_cache_level = 5;
explain SELECT t2.a FROM t1 LEFT JOIN t2 ON t2.b = t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
-1 SIMPLE t2 ref b b 103 test.t1.b 2 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ref b b 103 test.t1.b 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT t2.a FROM t1 LEFT JOIN t2 ON t2.b = t1.b;
a
NULL
@@ -4087,7 +4145,7 @@ set join_cache_level = 8;
explain SELECT t2.a FROM t1 LEFT JOIN t2 ON t2.b = t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
-1 SIMPLE t2 ref b b 103 test.t1.b 2 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ref b b 103 test.t1.b 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT t2.a FROM t1 LEFT JOIN t2 ON t2.b = t1.b;
a
NULL
@@ -4471,7 +4529,7 @@ EXPLAIN
SELECT t2.i FROM t1,t2 WHERE t1.cu = t2.cl ;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 6
-1 SIMPLE t1 ref cu cu 33 func 2 Using where; Using index
+1 SIMPLE t1 ref cu cu 33 func 1 Using where; Using index
SELECT t2.i FROM t1,t2 WHERE t1.cu = t2.cl ;
i
6
@@ -4804,7 +4862,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 SIMPLE t4 ref idx idx 5 test.t1.a1 2 100.00 Using where
+1 SIMPLE t4 ref idx idx 5 test.t1.a1 1 100.00 Using where
1 SIMPLE t5 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`a1` AS `a1`,`test`.`t2`.`pk` AS `pk`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`c2` AS `c2`,`test`.`t2`.`d2` AS `d2`,`test`.`t3`.`pk` AS `pk`,`test`.`t3`.`a3` AS `a3`,`test`.`t3`.`c3` AS `c3`,`test`.`t3`.`d3` AS `d3`,`test`.`t4`.`pk` AS `pk`,`test`.`t4`.`a4` AS `a4`,`test`.`t5`.`pk` AS `pk`,`test`.`t5`.`a5` AS `a5` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t2`.`d2` = `test`.`t1`.`pk` and `test`.`t3`.`a3` = `test`.`t2`.`c2`) left join `test`.`t4` on(`test`.`t4`.`a4` = `test`.`t1`.`a1` and `test`.`t1`.`a1` is not null) left join `test`.`t5` on(`test`.`t5`.`a5` = `test`.`t3`.`a3`) where 1
@@ -4826,7 +4884,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (incremental, BNL join)
-1 SIMPLE t4 ref idx idx 5 test.t1.a1 2 100.00 Using where
+1 SIMPLE t4 ref idx idx 5 test.t1.a1 1 100.00 Using where
1 SIMPLE t5 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`a1` AS `a1`,`test`.`t2`.`pk` AS `pk`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`c2` AS `c2`,`test`.`t2`.`d2` AS `d2`,`test`.`t3`.`pk` AS `pk`,`test`.`t3`.`a3` AS `a3`,`test`.`t3`.`c3` AS `c3`,`test`.`t3`.`d3` AS `d3`,`test`.`t4`.`pk` AS `pk`,`test`.`t4`.`a4` AS `a4`,`test`.`t5`.`pk` AS `pk`,`test`.`t5`.`a5` AS `a5` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t2`.`d2` = `test`.`t1`.`pk` and `test`.`t3`.`a3` = `test`.`t2`.`c2`) left join `test`.`t4` on(`test`.`t4`.`a4` = `test`.`t1`.`a1` and `test`.`t1`.`a1` is not null) left join `test`.`t5` on(`test`.`t5`.`a5` = `test`.`t3`.`a3`) where 1
@@ -4848,7 +4906,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 SIMPLE t4 ref idx idx 5 test.t1.a1 2 100.00 Using where
+1 SIMPLE t4 ref idx idx 5 test.t1.a1 1 100.00 Using where
1 SIMPLE t5 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`a1` AS `a1`,`test`.`t2`.`pk` AS `pk`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`c2` AS `c2`,`test`.`t2`.`d2` AS `d2`,`test`.`t3`.`pk` AS `pk`,`test`.`t3`.`a3` AS `a3`,`test`.`t3`.`c3` AS `c3`,`test`.`t3`.`d3` AS `d3`,`test`.`t4`.`pk` AS `pk`,`test`.`t4`.`a4` AS `a4`,`test`.`t5`.`pk` AS `pk`,`test`.`t5`.`a5` AS `a5` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t2`.`d2` = `test`.`t1`.`pk` and `test`.`t3`.`a3` = `test`.`t2`.`c2`) left join `test`.`t4` on(`test`.`t4`.`a4` = `test`.`t1`.`a1` and `test`.`t1`.`a1` is not null) left join `test`.`t5` on(`test`.`t5`.`a5` = `test`.`t3`.`a3`) where 1
@@ -4969,8 +5027,8 @@ FROM ((t1 LEFT JOIN (t2 JOIN t3 ON t2.a2 = t3.a3) ON t2.b2 = t1.a1)
LEFT JOIN t4 ON t4.a4 <> 0) LEFT JOIN t5 ON t5.a5 = t2.a2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
-1 SIMPLE t2 ALL PRIMARY NULL NULL NULL 1 Using where
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.a2 1 Using index
+1 SIMPLE t3 index PRIMARY PRIMARY 4 NULL 1 Using where; Using index
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t3.a3 1 Using where
1 SIMPLE t4 ALL NULL NULL NULL NULL 1 Using where
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t2.a2 1 Using where
SELECT t4.a4, t5.b5
@@ -5054,6 +5112,8 @@ CREATE TABLE t2 (
f3 int(11), f2 varchar(1024), f4 varchar(10), PRIMARY KEY (f3)
);
INSERT INTO t2 VALUES (6,'RPOYT','y'),(10,'JINQE','m');
+INSERT INTO t2 VALUES (100,'Q','q'),(101,'Q','q'),(102,'Q','q'),(103,'Q','q');
+INSERT INTO t2 VALUES (104,'Q','q'),(105,'Q','q'),(106,'Q','q'),(107,'Q','q');
SET SESSION join_cache_level = 1;
SET SESSION optimizer_switch = 'index_condition_pushdown=off';
EXPLAIN
@@ -5093,8 +5153,8 @@ SET SESSION optimizer_switch = 'index_condition_pushdown=off';
EXPLAIN SELECT * FROM t1,t2
WHERE t2.f3 = t1.f2 AND t1.f1 IN (9, 0, 100) ORDER BY t1.f2 LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range f1,f2 f2 13 NULL 10 Using where
-1 SIMPLE t2 ref f3 f3 67 test.t1.f2 2 Using where; Using index
+1 SIMPLE t1 range f1,f2 f1 5 NULL 3 Using where; Rowid-ordered scan; Using filesort
+1 SIMPLE t2 ref f3 f3 67 test.t1.f2 1 Using where; Using index
SELECT * FROM t1,t2
WHERE t2.f3 = t1.f2 AND t1.f1 IN (9, 0 ,100) ORDER BY t1.f2 LIMIT 1;
f1 f2 f3
@@ -5103,8 +5163,8 @@ SET SESSION optimizer_switch = 'index_condition_pushdown=on';
EXPLAIN SELECT * FROM t1,t2
WHERE t2.f3 = t1.f2 AND t1.f1 IN (9, 0 ,100) ORDER BY t1.f2 LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range f1,f2 f2 13 NULL 10 Using where
-1 SIMPLE t2 ref f3 f3 67 test.t1.f2 2 Using where; Using index
+1 SIMPLE t1 range f1,f2 f1 5 NULL 3 Using index condition; Using where; Rowid-ordered scan; Using filesort
+1 SIMPLE t2 ref f3 f3 67 test.t1.f2 1 Using where; Using index
SELECT * FROM t1,t2
WHERE t2.f3 = t1.f2 AND t1.f1 IN (9, 0 ,100) ORDER BY t1.f2 LIMIT 1;
f1 f2 f3
@@ -5205,7 +5265,7 @@ EXPLAIN
SELECT a FROM t1,t2 WHERE t2.v = t1.v ;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
-1 SIMPLE t2 ref idx idx 4 test.t1.v 2
+1 SIMPLE t2 ref idx idx 4 test.t1.v 1
SELECT a FROM t1,t2 WHERE t2.v = t1.v ;
a
11
@@ -5263,7 +5323,7 @@ EXPLAIN
SELECT * FROM t1 WHERE (t1.b) IN (SELECT c FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-1 PRIMARY t2 ref c c 5 test.t1.b 2 Using index; Start temporary; End temporary
+1 PRIMARY t2 ref c c 5 test.t1.b 1 Using index; Start temporary; End temporary
SELECT * FROM t1 WHERE (t1.b) IN (SELECT c FROM t2);
a b
3914 17
@@ -5443,7 +5503,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
1 PRIMARY t3 system NULL NULL NULL NULL 1
1 PRIMARY t2 range a,c a 5 NULL 2 Using index condition; Using where; Using filesort
-1 PRIMARY t4 ref c c 5 test.t2.c 2 Using where; Start temporary; End temporary
+1 PRIMARY t4 ref c c 5 test.t2.c 1 Using where; Start temporary; End temporary
SELECT * FROM t1,t2
WHERE t2.c IN (SELECT c FROM t3,t4 WHERE t4.a < 10) AND
t2.a BETWEEN 4 and 5
@@ -5461,7 +5521,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1 Using temporary; Using filesort
1 PRIMARY t3 system NULL NULL NULL NULL 1
1 PRIMARY t2 range a,c a 5 NULL 2 Using index condition; Using where
-1 PRIMARY t4 ref c c 5 test.t2.c 2 Using where; Start temporary; End temporary
+1 PRIMARY t4 ref c c 5 test.t2.c 1 Using where; Start temporary; End temporary
SELECT * FROM t1,t2
WHERE t2.c IN (SELECT c FROM t3,t4 WHERE t4.a < 10) AND
t2.a BETWEEN 4 and 5
@@ -5480,7 +5540,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1 Using temporary; Using filesort
1 PRIMARY t3 system NULL NULL NULL NULL 1
1 PRIMARY t2 range a,c a 5 NULL 2 Using index condition; Using where
-1 PRIMARY t4 ref c c 5 test.t2.c 2 Using where; Start temporary; End temporary
+1 PRIMARY t4 ref c c 5 test.t2.c 1 Using where; Start temporary; End temporary
SELECT * FROM t1,t2
WHERE t2.c IN (SELECT c FROM t3,t4 WHERE t4.a < 10) AND
t2.a BETWEEN 4 and 5
@@ -5523,7 +5583,7 @@ and t2.uid=t1.fid;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ref uid uid 5 const 4 Using where; Start temporary
1 PRIMARY t4 eq_ref PRIMARY PRIMARY 4 test.t3.fid 1 Using index
-1 PRIMARY t1 ALL uid NULL NULL NULL 11 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ref uid uid 5 test.t3.fid 1 Using where; End temporary; Using join buffer (flat, BKAH join); Rowid-ordered scan
1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t1.fid 1 Using join buffer (flat, BKAH join); Rowid-ordered scan
select name from t2, t1
where t1.uid in (select t4.uid from t4, t3 where t3.uid=1 and t4.uid=t3.fid)
@@ -5975,12 +6035,12 @@ f1 f2
EXPLAIN EXTENDED SELECT * FROM temp
WHERE (f1,f2) IN (SELECT t1.i1, t1.v1 FROM (t2 JOIN t1 ON (t1.v1 = t2.v1)));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 1 100.00
-1 PRIMARY temp hash_ALL NULL #hash#$hj 9 test.t1.i1,test.t1.v1 7 100.00 Using where; Using join buffer (flat, BNLH join)
+1 PRIMARY temp ALL NULL NULL NULL NULL 7 100.00
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1 100.00
2 MATERIALIZED t1 ALL NULL NULL NULL NULL 1 100.00 Using where
2 MATERIALIZED t2 hash_index v1 #hash#v1:v1 4:9 test.t1.v1 10 33.33 Using index; Using join buffer (flat, BNLH join)
Warnings:
-Note 1003 select `test`.`temp`.`f1` AS `f1`,`test`.`temp`.`f2` AS `f2` from `test`.`temp` semi join (`test`.`t2` join `test`.`t1`) where `test`.`temp`.`f1` = `test`.`t1`.`i1` and `test`.`t2`.`v1` = `test`.`t1`.`v1` and `test`.`temp`.`f2` = `test`.`t1`.`v1`
+Note 1003 select `test`.`temp`.`f1` AS `f1`,`test`.`temp`.`f2` AS `f2` from `test`.`temp` semi join (`test`.`t2` join `test`.`t1`) where `test`.`t2`.`v1` = `test`.`t1`.`v1`
DROP TABLE t1,t2,temp;
set join_cache_level=@save_join_cache_level;
#
@@ -6194,6 +6254,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -6203,7 +6264,9 @@ EXPLAIN
"key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "a.a <= 10",
"using_index": true
@@ -6218,8 +6281,10 @@ EXPLAIN
"key": "kp1",
"key_length": "10",
"used_key_parts": ["kp1", "kp2"],
+ "loops": 10,
"rows": 836,
- "filtered": 76,
+ "cost": "COST_REPLACED",
+ "filtered": 9.090909004,
"index_condition": "b.kp2 <= 10",
"attached_condition": "b.kp2 <= 10 and b.col1 + 1 < 33333"
},
diff --git a/mysql-test/main/join_cache.test b/mysql-test/main/join_cache.test
index 125ae84c309..ad2eacbe2bb 100644
--- a/mysql-test/main/join_cache.test
+++ b/mysql-test/main/join_cache.test
@@ -6,6 +6,7 @@
DROP TABLE IF EXISTS t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11;
DROP DATABASE IF EXISTS world;
--enable_warnings
+--source include/have_innodb.inc
set @org_optimizer_switch=@@optimizer_switch;
set @save_join_cache_level=@@join_cache_level;
@@ -53,16 +54,19 @@ set join_cache_level=1;
show variables like 'join_cache_level';
+--echo # Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # Query 2
--sorted_result
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -72,6 +76,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # Query 4
--sorted_result
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -84,16 +89,19 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
set join_cache_level=2;
show variables like 'join_cache_level';
+--echo # join_cache_level 2, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 2, Query 2
--sorted_result
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 2, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -103,6 +111,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # join_cache_level 2, Query 4
--sorted_result
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -115,16 +124,19 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
set join_cache_level=3;
show variables like 'join_cache_level';
+--echo # join_cache_level 3, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 3, Query 2
--sorted_result
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 3, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -134,7 +146,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
-
+--echo # join_cache_level 3, Query 4
--sorted_result
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -148,16 +160,19 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
set join_cache_level=4;
show variables like 'join_cache_level';
+--echo # join_cache_level 4, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 4, Query 2
--sorted_result
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 4, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -167,6 +182,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # join_cache_level 4, Query 4
--sorted_result
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -177,11 +193,13 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # join_cache_level 4, Query 5
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND City.Population > 5000000
WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
+--echo # join_cache_level 4, Query 6
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND
@@ -195,17 +213,20 @@ CREATE INDEX City_Name ON City(Name);
ANALYZE TABLE City;
--enable_result_log
+--echo # After Analyze, Query 1
EXPLAIN
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND City.Population > 5000000
WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
+--echo # After Analyze, Query 2
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND City.Population > 5000000
WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
+--echo # After Analyze, Query 3
EXPLAIN
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
@@ -213,6 +234,7 @@ SELECT Country.Name, Country.Population, City.Name, City.Population
(City.Population > 5000000 OR City.Name LIKE 'Za%')
WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
+--echo # After Analyze, Query 4
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND
@@ -229,16 +251,19 @@ show variables like 'join_buffer_size';
show variables like 'join_cache_level';
+--echo # join_cache_level 1, Join_buffer_size, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 1, Join_buffer_size, Query 2
--sorted_result
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 1, Join_buffer_size, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -248,6 +273,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # join_cache_level 1, Join_buffer_size, Query 4
--sorted_result
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -260,16 +286,19 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
set join_cache_level=2;
show variables like 'join_cache_level';
+--echo # join_cache_level 2, Join_buffer_size, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 2, Join_buffer_size, Query 2
--sorted_result
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 2, Join_buffer_size, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -279,6 +308,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # join_cache_level 2, Join_buffer_size, Query 4
--sorted_result
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -291,16 +321,19 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
set join_cache_level=3;
show variables like 'join_cache_level';
+--echo # join_cache_level 3, Join_buffer_size, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 3, Join_buffer_size, Query 2
--sorted_result
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 3, Join_buffer_size, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -310,6 +343,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # join_cache_level 3, Join_buffer_size, Query 4
--sorted_result
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -322,16 +356,19 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
set join_cache_level=4;
show variables like 'join_cache_level';
+--echo # join_cache_level 4, Join_buffer_size, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 4, Join_buffer_size, Query 2
--sorted_result
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # join_cache_level 4, Join_buffer_size, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -341,6 +378,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # join_cache_level 4, Join_buffer_size, Query 4
--sorted_result
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -374,16 +412,19 @@ show variables like 'join_buffer_size';
set join_cache_level=3;
show variables like 'join_cache_level';
+--echo # Part 2, join_cache_level=3, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # Part 2, join_cache_level=3, Query 2
--sorted_result
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # Part 2, join_cache_level=3, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -393,6 +434,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # Part 2, join_cache_level=3, Query 4
--sorted_result
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -402,15 +444,18 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # Part 2, join_cache_level=3, Query 5
EXPLAIN
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
+--echo # Part 2, join_cache_level=3, Query 6
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
+--echo # Part 2, join_cache_level=3, Query 7
#enable after fix MDEV-27871
--disable_view_protocol
@@ -421,6 +466,7 @@ SELECT Country.Name, IF(ISNULL(CountryLanguage.Country), NULL, CountryLanguage.P
WHERE
Country.Population > 10000000;
+--echo # Part 2, join_cache_level=3, Query 8
SELECT Country.Name, IF(ISNULL(CountryLanguage.Country), NULL, CountryLanguage.Percentage)
FROM Country LEFT JOIN CountryLanguage ON
(CountryLanguage.Country=Country.Code AND Language='English')
@@ -433,16 +479,19 @@ show variables like 'join_buffer_size';
set join_cache_level=4;
show variables like 'join_cache_level';
+--echo # Part 2, join_cache_level=4, Query 1
EXPLAIN
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # Part 2, join_cache_level=4, Query 2
--sorted_result
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
+--echo # Part 2, join_cache_level=4, Query 3
EXPLAIN
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -452,6 +501,7 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # Part 2, join_cache_level=4, Query 4
--sorted_result
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
@@ -461,15 +511,18 @@ SELECT City.Name, Country.Name, CountryLanguage.Language
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
+--echo # Part 2, join_cache_level=4, Query 5
EXPLAIN
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
+--echo # Part 2, join_cache_level=4, Query 6
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
+--echo # Part 2, join_cache_level=4, Query 7
#enable after fix MDEV-27871
--disable_view_protocol
@@ -480,6 +533,7 @@ SELECT Country.Name, IF(ISNULL(CountryLanguage.Country), NULL, CountryLanguage.P
WHERE
Country.Population > 10000000;
+--echo # Part 2, join_cache_level=4, Query 8
SELECT Country.Name, IF(ISNULL(CountryLanguage.Country), NULL, CountryLanguage.Percentage)
FROM Country LEFT JOIN CountryLanguage ON
(CountryLanguage.Country=Country.Code AND Language='English')
@@ -487,7 +541,7 @@ SELECT Country.Name, IF(ISNULL(CountryLanguage.Country), NULL, CountryLanguage.P
Country.Population > 10000000;
--enable_view_protocol
-
+--echo # Part 2, join_cache_level=4, Query 9
--replace_column 9 #
EXPLAIN
SELECT Country.Name, Country.Population, City.Name, City.Population
@@ -495,6 +549,7 @@ SELECT Country.Name, Country.Population, City.Name, City.Population
ON City.Country=Country.Code AND City.Population > 5000000
WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
+--echo # Part 2, join_cache_level=4, Query 10
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND City.Population > 5000000
@@ -502,6 +557,7 @@ SELECT Country.Name, Country.Population, City.Name, City.Population
CREATE INDEX City_Name ON City(Name);
+--echo # Part 2, join_cache_level=4, City_Name, Query 1
EXPLAIN
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
@@ -509,6 +565,7 @@ SELECT Country.Name, Country.Population, City.Name, City.Population
(City.Population > 5000000 OR City.Name LIKE 'Za%')
WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
+--echo # Part 2, join_cache_level=4, City_Name, Query 2
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND
@@ -3206,6 +3263,8 @@ CREATE TABLE t2 (
f3 int(11), f2 varchar(1024), f4 varchar(10), PRIMARY KEY (f3)
);
INSERT INTO t2 VALUES (6,'RPOYT','y'),(10,'JINQE','m');
+INSERT INTO t2 VALUES (100,'Q','q'),(101,'Q','q'),(102,'Q','q'),(103,'Q','q');
+INSERT INTO t2 VALUES (104,'Q','q'),(105,'Q','q'),(106,'Q','q'),(107,'Q','q');
SET SESSION join_cache_level = 1;
@@ -3808,8 +3867,6 @@ drop table t0,t1,t2;
--echo # of LEFT JOIN operations when using join buffer
--echo #
---source include/have_innodb.inc
-
CREATE TABLE t1 (
id int(11) NOT NULL AUTO_INCREMENT,
col1 varchar(255) NOT NULL DEFAULT '',
@@ -4188,6 +4245,7 @@ analyze table t3;
--echo # The following must have "B.col1 + 1 < 33333" attached to table B
--echo # and not to the block-nl-join node:
+--source include/explain-no-costs.inc
explain format=json
select *
from t1 a, t3 b
diff --git a/mysql-test/main/join_nested.result b/mysql-test/main/join_nested.result
index fdfc5a2e7f5..7eaaf85e2bf 100644
--- a/mysql-test/main/join_nested.result
+++ b/mysql-test/main/join_nested.result
@@ -695,21 +695,21 @@ t0.b=t1.b AND
(t9.a=1);
a b a b a b a b a b a b a b a b a b a b
1 2 2 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 1
-1 2 3 2 4 2 1 2 3 2 3 1 6 2 1 1 NULL NULL 1 1
-1 2 3 2 4 2 1 2 3 2 3 3 NULL NULL NULL NULL NULL NULL 1 1
-1 2 3 2 4 2 1 2 4 2 3 1 6 2 1 1 NULL NULL 1 1
-1 2 3 2 4 2 1 2 4 2 3 3 NULL NULL NULL NULL NULL NULL 1 1
-1 2 3 2 5 3 NULL NULL NULL NULL 3 1 6 2 1 1 NULL NULL 1 1
-1 2 3 2 5 3 NULL NULL NULL NULL 3 3 NULL NULL NULL NULL NULL NULL 1 1
1 2 2 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 2
-1 2 3 2 4 2 1 2 3 2 3 1 6 2 1 1 NULL NULL 1 2
1 2 3 2 4 2 1 2 3 2 2 2 6 2 2 2 0 2 1 2
+1 2 3 2 4 2 1 2 3 2 3 1 6 2 1 1 NULL NULL 1 1
+1 2 3 2 4 2 1 2 3 2 3 1 6 2 1 1 NULL NULL 1 2
+1 2 3 2 4 2 1 2 3 2 3 3 NULL NULL NULL NULL NULL NULL 1 1
1 2 3 2 4 2 1 2 3 2 3 3 NULL NULL NULL NULL NULL NULL 1 2
-1 2 3 2 4 2 1 2 4 2 3 1 6 2 1 1 NULL NULL 1 2
1 2 3 2 4 2 1 2 4 2 2 2 6 2 2 2 0 2 1 2
+1 2 3 2 4 2 1 2 4 2 3 1 6 2 1 1 NULL NULL 1 1
+1 2 3 2 4 2 1 2 4 2 3 1 6 2 1 1 NULL NULL 1 2
+1 2 3 2 4 2 1 2 4 2 3 3 NULL NULL NULL NULL NULL NULL 1 1
1 2 3 2 4 2 1 2 4 2 3 3 NULL NULL NULL NULL NULL NULL 1 2
-1 2 3 2 5 3 NULL NULL NULL NULL 3 1 6 2 1 1 NULL NULL 1 2
1 2 3 2 5 3 NULL NULL NULL NULL 2 2 6 2 2 2 0 2 1 2
+1 2 3 2 5 3 NULL NULL NULL NULL 3 1 6 2 1 1 NULL NULL 1 1
+1 2 3 2 5 3 NULL NULL NULL NULL 3 1 6 2 1 1 NULL NULL 1 2
+1 2 3 2 5 3 NULL NULL NULL NULL 3 3 NULL NULL NULL NULL NULL NULL 1 1
1 2 3 2 5 3 NULL NULL NULL NULL 3 3 NULL NULL NULL NULL NULL NULL 1 2
SELECT t1.a,t1.b
FROM t1;
@@ -855,7 +855,7 @@ ON t3.a=1 AND t3.b=t2.b AND t2.b=t4.b AND t2.a>0;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00
1 SIMPLE t4 ALL NULL NULL NULL NULL 2 100.00 Using join buffer (flat, BNL join)
-1 SIMPLE t2 ref idx_b idx_b 5 test.t3.b 2 100.00 Using where
+1 SIMPLE t2 ref idx_b idx_b 5 test.t3.b 1 100.00 Using where
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b` from `test`.`t3` join `test`.`t4` left join (`test`.`t1` join `test`.`t2`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t3`.`b` and `test`.`t2`.`b` = `test`.`t3`.`b` and `test`.`t2`.`a` > 0 and `test`.`t3`.`b` is not null) where 1
@@ -969,10 +969,10 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t6 ALL NULL NULL NULL NULL 3 100.00 Using where
1 SIMPLE t8 ALL NULL NULL NULL NULL 2 100.00 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where
+1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 1 100.00 Using where
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 2 100.00 Using where
Warnings:
-Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`a` > 0 and `test`.`t4`.`a` > 0 and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2 and `test`.`t5`.`a` > 0)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t4`.`b` = `test`.`t3`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
+Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`a` > 0 and `test`.`t4`.`a` > 0 and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2 and `test`.`t5`.`a` > 0)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t3`.`b` = `test`.`t4`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
INSERT INTO t8 VALUES (-3,12,0), (-1,14,0), (-5,15,0), (-1,11,0), (-4,13,0);
CREATE INDEX idx_b ON t8(b);
EXPLAIN EXTENDED
@@ -1017,12 +1017,12 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t5 ALL idx_b NULL NULL NULL 7 100.00 Using where
1 SIMPLE t7 ALL NULL NULL NULL NULL 2 100.00 Using where
1 SIMPLE t6 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 SIMPLE t8 ref idx_b idx_b 5 test.t5.b 2 100.00 Using where
+1 SIMPLE t8 ref idx_b idx_b 5 test.t5.b 1 100.00 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where
+1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 1 100.00 Using where
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 2 100.00 Using where
Warnings:
-Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`a` > 0 and `test`.`t4`.`a` > 0 and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10 and `test`.`t8`.`a` >= 0 and `test`.`t5`.`b` is not null)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2 and `test`.`t5`.`a` > 0)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t4`.`b` = `test`.`t3`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
+Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`a` > 0 and `test`.`t4`.`a` > 0 and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10 and `test`.`t8`.`a` >= 0 and `test`.`t5`.`b` is not null)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2 and `test`.`t5`.`a` > 0)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t3`.`b` = `test`.`t4`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
INSERT INTO t1 VALUES (-1,133,0), (-2,12,0), (-3,11,0), (-5,15,0);
CREATE INDEX idx_b ON t1(b);
CREATE INDEX idx_a ON t0(a);
@@ -1062,18 +1062,18 @@ t0.b=t1.b AND
(t8.b=t9.b OR t8.c IS NULL) AND
(t9.a=1);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t0 ref idx_a idx_a 5 const 2 100.00
+1 SIMPLE t0 ref idx_a idx_a 5 const 2 100.00 Using where
+1 SIMPLE t1 ref idx_b idx_b 5 test.t0.b 1 100.00
1 SIMPLE t9 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t1 ALL idx_b NULL NULL NULL 7 100.00 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t5 ALL idx_b NULL NULL NULL 7 100.00 Using where
1 SIMPLE t7 ALL NULL NULL NULL NULL 2 100.00 Using where
1 SIMPLE t6 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 SIMPLE t8 ref idx_b idx_b 5 test.t5.b 2 100.00 Using where
+1 SIMPLE t8 ref idx_b idx_b 5 test.t5.b 1 100.00 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where
+1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 1 100.00 Using where
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 2 100.00 Using where
Warnings:
-Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10 and `test`.`t5`.`b` is not null)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2 and `test`.`t1`.`a` > 0) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t4`.`b` = `test`.`t3`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
+Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10 and `test`.`t5`.`b` is not null)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2 and `test`.`t1`.`a` > 0) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t3`.`b` = `test`.`t4`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
SELECT t0.a,t0.b,t1.a,t1.b,t2.a,t2.b,t3.a,t3.b,t4.a,t4.b,
t5.a,t5.b,t6.a,t6.b,t7.a,t7.b,t8.a,t8.b,t9.a,t9.b
FROM t0,t1
@@ -1210,12 +1210,12 @@ EXPLAIN SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON c < 3 and b = c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 5 NULL 21 Using index
1 SIMPLE t2 range b b 5 NULL 3 Using where; Using index
-1 SIMPLE t3 ref c c 5 test.t2.b 2 Using index
+1 SIMPLE t3 ref c c 5 test.t2.b 1 Using index
EXPLAIN SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 5 NULL 21 Using index
1 SIMPLE t2 range b b 5 NULL 3 Using where; Using index
-1 SIMPLE t3 ref c c 5 test.t2.b 2 Using index
+1 SIMPLE t3 ref c c 5 test.t2.b 1 Using index
SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
a b c
NULL 0 0
@@ -1284,8 +1284,8 @@ NULL 2 2
DELETE FROM t3;
EXPLAIN SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 const c NULL NULL NULL 1 Impossible ON condition
-1 SIMPLE t2 const b NULL NULL NULL 1 Impossible ON condition
+1 SIMPLE t3 const c NULL NULL NULL 0 Impossible ON condition
+1 SIMPLE t2 const b NULL NULL NULL 0 Impossible ON condition
1 SIMPLE t1 index NULL a 5 NULL 21 Using index
SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
a b c
@@ -1743,10 +1743,10 @@ LEFT JOIN
ON t4.carrier = t1.carrier;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index package_id package_id 5 NULL 45 Using where; Using index
+1 SIMPLE t3 ref package_id package_id 5 test.t2.package_id 1 Using index
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.package_id 1
1 SIMPLE t4 eq_ref PRIMARY,id PRIMARY 2 test.t1.carrier 1 Using where
1 SIMPLE t5 ref carrier_id carrier_id 5 test.t4.id 22 Using index
-1 SIMPLE t3 ref package_id package_id 5 test.t2.package_id 1 Using index
SELECT COUNT(*)
FROM ((t2 JOIN t1 ON t2.package_id = t1.id)
JOIN t3 ON t3.package_id = t1.id)
diff --git a/mysql-test/main/join_nested.test b/mysql-test/main/join_nested.test
index ed1fe4c9f7e..ee89c91e734 100644
--- a/mysql-test/main/join_nested.test
+++ b/mysql-test/main/join_nested.test
@@ -364,6 +364,7 @@ SELECT t0.a,t0.b,t1.a,t1.b,t2.a,t2.b,t3.a,t3.b,t4.a,t4.b,
SELECT t9.a,t9.b
FROM t9;
+--sorted_result
SELECT t0.a,t0.b,t1.a,t1.b,t2.a,t2.b,t3.a,t3.b,t4.a,t4.b,
t5.a,t5.b,t6.a,t6.b,t7.a,t7.b,t8.a,t8.b,t9.a,t9.b
FROM t0,t1
diff --git a/mysql-test/main/join_nested_jcl6.result b/mysql-test/main/join_nested_jcl6.result
index 451accd7b1c..d5c46d48e68 100644
--- a/mysql-test/main/join_nested_jcl6.result
+++ b/mysql-test/main/join_nested_jcl6.result
@@ -703,23 +703,23 @@ t0.b=t1.b AND
(t8.b=t9.b OR t8.c IS NULL) AND
(t9.a=1);
a b a b a b a b a b a b a b a b a b a b
-1 2 3 2 4 2 1 2 3 2 3 1 6 2 1 1 NULL NULL 1 1
-1 2 3 2 4 2 1 2 4 2 3 1 6 2 1 1 NULL NULL 1 1
+1 2 2 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 1
+1 2 2 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 2
1 2 3 2 4 2 1 2 3 2 2 2 6 2 2 2 0 2 1 2
-1 2 3 2 4 2 1 2 4 2 2 2 6 2 2 2 0 2 1 2
+1 2 3 2 4 2 1 2 3 2 3 1 6 2 1 1 NULL NULL 1 1
1 2 3 2 4 2 1 2 3 2 3 1 6 2 1 1 NULL NULL 1 2
-1 2 3 2 4 2 1 2 4 2 3 1 6 2 1 1 NULL NULL 1 2
1 2 3 2 4 2 1 2 3 2 3 3 NULL NULL NULL NULL NULL NULL 1 1
-1 2 3 2 4 2 1 2 4 2 3 3 NULL NULL NULL NULL NULL NULL 1 1
1 2 3 2 4 2 1 2 3 2 3 3 NULL NULL NULL NULL NULL NULL 1 2
+1 2 3 2 4 2 1 2 4 2 2 2 6 2 2 2 0 2 1 2
+1 2 3 2 4 2 1 2 4 2 3 1 6 2 1 1 NULL NULL 1 1
+1 2 3 2 4 2 1 2 4 2 3 1 6 2 1 1 NULL NULL 1 2
+1 2 3 2 4 2 1 2 4 2 3 3 NULL NULL NULL NULL NULL NULL 1 1
1 2 3 2 4 2 1 2 4 2 3 3 NULL NULL NULL NULL NULL NULL 1 2
-1 2 3 2 5 3 NULL NULL NULL NULL 3 1 6 2 1 1 NULL NULL 1 1
1 2 3 2 5 3 NULL NULL NULL NULL 2 2 6 2 2 2 0 2 1 2
+1 2 3 2 5 3 NULL NULL NULL NULL 3 1 6 2 1 1 NULL NULL 1 1
1 2 3 2 5 3 NULL NULL NULL NULL 3 1 6 2 1 1 NULL NULL 1 2
1 2 3 2 5 3 NULL NULL NULL NULL 3 3 NULL NULL NULL NULL NULL NULL 1 1
1 2 3 2 5 3 NULL NULL NULL NULL 3 3 NULL NULL NULL NULL NULL NULL 1 2
-1 2 2 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 1
-1 2 2 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 2
SELECT t1.a,t1.b
FROM t1;
a b
@@ -864,7 +864,7 @@ ON t3.a=1 AND t3.b=t2.b AND t2.b=t4.b AND t2.a>0;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00
1 SIMPLE t4 ALL NULL NULL NULL NULL 2 100.00 Using join buffer (flat, BNL join)
-1 SIMPLE t2 ref idx_b idx_b 5 test.t3.b 2 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ref idx_b idx_b 5 test.t3.b 1 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using join buffer (incremental, BNL join)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b` from `test`.`t3` join `test`.`t4` left join (`test`.`t1` join `test`.`t2`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t3`.`b` and `test`.`t2`.`b` = `test`.`t3`.`b` and `test`.`t2`.`a` > 0 and `test`.`t3`.`b` is not null) where 1
@@ -978,10 +978,10 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t6 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (incremental, BNL join)
1 SIMPLE t8 hash_ALL NULL #hash#$hj 5 test.t5.b 2 100.00 Using where; Using join buffer (incremental, BNLH join)
1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 1 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (incremental, BNL join)
-1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 2 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
Warnings:
-Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`a` > 0 and `test`.`t4`.`a` > 0 and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10 and `test`.`t5`.`b` is not null)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2 and `test`.`t5`.`a` > 0 and `test`.`t5`.`b` is not null)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t4`.`b` = `test`.`t3`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
+Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`a` > 0 and `test`.`t4`.`a` > 0 and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10 and `test`.`t5`.`b` is not null)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2 and `test`.`t5`.`a` > 0 and `test`.`t5`.`b` is not null)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t3`.`b` = `test`.`t4`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
INSERT INTO t8 VALUES (-3,12,0), (-1,14,0), (-5,15,0), (-1,11,0), (-4,13,0);
CREATE INDEX idx_b ON t8(b);
EXPLAIN EXTENDED
@@ -1026,12 +1026,12 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t5 ALL idx_b NULL NULL NULL 7 100.00 Using where; Using join buffer (incremental, BNL join)
1 SIMPLE t7 hash_ALL NULL #hash#$hj 5 test.t5.b 2 100.00 Using where; Using join buffer (incremental, BNLH join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (incremental, BNL join)
-1 SIMPLE t8 ref idx_b idx_b 5 test.t5.b 2 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t8 ref idx_b idx_b 5 test.t5.b 1 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 1 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (incremental, BNL join)
-1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 2 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
Warnings:
-Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`a` > 0 and `test`.`t4`.`a` > 0 and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10 and `test`.`t8`.`a` >= 0 and `test`.`t5`.`b` is not null)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2 and `test`.`t5`.`a` > 0 and `test`.`t5`.`b` is not null)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t4`.`b` = `test`.`t3`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
+Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`a` > 0 and `test`.`t4`.`a` > 0 and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10 and `test`.`t8`.`a` >= 0 and `test`.`t5`.`b` is not null)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2 and `test`.`t5`.`a` > 0 and `test`.`t5`.`b` is not null)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t3`.`b` = `test`.`t4`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
INSERT INTO t1 VALUES (-1,133,0), (-2,12,0), (-3,11,0), (-5,15,0);
CREATE INDEX idx_b ON t1(b);
CREATE INDEX idx_a ON t0(a);
@@ -1071,18 +1071,18 @@ t0.b=t1.b AND
(t8.b=t9.b OR t8.c IS NULL) AND
(t9.a=1);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t0 ref idx_a idx_a 5 const 2 100.00
-1 SIMPLE t9 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t1 ALL idx_b NULL NULL NULL 7 100.00 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE t0 ref idx_a idx_a 5 const 2 100.00 Using where
+1 SIMPLE t1 ref idx_b idx_b 5 test.t0.b 1 100.00 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t9 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (incremental, BNL join)
1 SIMPLE t5 ALL idx_b NULL NULL NULL 7 100.00 Using where; Using join buffer (incremental, BNL join)
1 SIMPLE t7 hash_ALL NULL #hash#$hj 5 test.t5.b 2 100.00 Using where; Using join buffer (incremental, BNLH join)
1 SIMPLE t6 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (incremental, BNL join)
-1 SIMPLE t8 ref idx_b idx_b 5 test.t5.b 2 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t8 ref idx_b idx_b 5 test.t5.b 1 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 1 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (incremental, BNL join)
-1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 2 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
Warnings:
-Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10 and `test`.`t5`.`b` is not null)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2 and `test`.`t5`.`b` is not null)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2 and `test`.`t1`.`a` > 0) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t4`.`b` = `test`.`t3`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
+Note 1003 select `test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t5`.`a` AS `a`,`test`.`t5`.`b` AS `b`,`test`.`t6`.`a` AS `a`,`test`.`t6`.`b` AS `b`,`test`.`t7`.`a` AS `a`,`test`.`t7`.`b` AS `b`,`test`.`t8`.`a` AS `a`,`test`.`t8`.`b` AS `b`,`test`.`t9`.`a` AS `a`,`test`.`t9`.`b` AS `b` from `test`.`t0` join `test`.`t1` left join (`test`.`t2` left join (`test`.`t3` join `test`.`t4`) on(`test`.`t3`.`a` = 1 and `test`.`t4`.`b` = `test`.`t2`.`b` and `test`.`t2`.`b` is not null) join `test`.`t5` left join (`test`.`t6` join `test`.`t7` left join `test`.`t8` on(`test`.`t8`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` < 10 and `test`.`t5`.`b` is not null)) on(`test`.`t7`.`b` = `test`.`t5`.`b` and `test`.`t6`.`b` >= 2 and `test`.`t5`.`b` is not null)) on((`test`.`t3`.`b` = 2 or `test`.`t3`.`c` is null) and (`test`.`t6`.`b` = 2 or `test`.`t6`.`c` is null) and (`test`.`t5`.`b` = `test`.`t0`.`b` or `test`.`t3`.`c` is null or `test`.`t6`.`c` is null or `test`.`t8`.`c` is null) and `test`.`t1`.`a` <> 2 and `test`.`t1`.`a` > 0) join `test`.`t9` where `test`.`t0`.`a` = 1 and `test`.`t1`.`b` = `test`.`t0`.`b` and `test`.`t9`.`a` = 1 and (`test`.`t2`.`a` >= 4 or `test`.`t2`.`c` is null) and (`test`.`t3`.`a` < 5 or `test`.`t3`.`c` is null) and (`test`.`t3`.`b` = `test`.`t4`.`b` or `test`.`t3`.`c` is null or `test`.`t4`.`c` is null) and (`test`.`t5`.`a` >= 2 or `test`.`t5`.`c` is null) and (`test`.`t6`.`a` >= 4 or `test`.`t6`.`c` is null) and (`test`.`t7`.`a` <= 2 or `test`.`t7`.`c` is null) and (`test`.`t8`.`a` < 1 or `test`.`t8`.`c` is null) and (`test`.`t8`.`b` = `test`.`t9`.`b` or `test`.`t8`.`c` is null)
SELECT t0.a,t0.b,t1.a,t1.b,t2.a,t2.b,t3.a,t3.b,t4.a,t4.b,
t5.a,t5.b,t6.a,t6.b,t7.a,t7.b,t8.a,t8.b,t9.a,t9.b
FROM t0,t1
@@ -1219,12 +1219,12 @@ EXPLAIN SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON c < 3 and b = c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 5 NULL 21 Using index
1 SIMPLE t2 range b b 5 NULL 3 Using where; Using index
-1 SIMPLE t3 ref c c 5 test.t2.b 2 Using index
+1 SIMPLE t3 ref c c 5 test.t2.b 1 Using index
EXPLAIN SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 5 NULL 21 Using index
1 SIMPLE t2 range b b 5 NULL 3 Using where; Using index
-1 SIMPLE t3 ref c c 5 test.t2.b 2 Using index
+1 SIMPLE t3 ref c c 5 test.t2.b 1 Using index
SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
a b c
NULL 0 0
@@ -1293,8 +1293,8 @@ NULL 2 2
DELETE FROM t3;
EXPLAIN SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 const c NULL NULL NULL 1 Impossible ON condition
-1 SIMPLE t2 const b NULL NULL NULL 1 Impossible ON condition
+1 SIMPLE t3 const c NULL NULL NULL 0 Impossible ON condition
+1 SIMPLE t2 const b NULL NULL NULL 0 Impossible ON condition
1 SIMPLE t1 index NULL a 5 NULL 21 Using index
SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
a b c
@@ -1752,10 +1752,10 @@ LEFT JOIN
ON t4.carrier = t1.carrier;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index package_id package_id 5 NULL 45 Using where; Using index
+1 SIMPLE t3 ref package_id package_id 5 test.t2.package_id 1 Using index
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.package_id 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t4 eq_ref PRIMARY,id PRIMARY 2 test.t1.carrier 1 Using where
1 SIMPLE t5 ref carrier_id carrier_id 5 test.t4.id 22 Using index
-1 SIMPLE t3 ref package_id package_id 5 test.t2.package_id 1 Using index
SELECT COUNT(*)
FROM ((t2 JOIN t1 ON t2.package_id = t1.id)
JOIN t3 ON t3.package_id = t1.id)
@@ -2085,9 +2085,9 @@ ON t6.b >= 2 AND t5.b=t7.b AND
(t8.a > 0 OR t8.c IS NULL) AND t6.a>0 AND t7.a>0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t5 ALL NULL NULL NULL NULL 3
-1 SIMPLE t7 range PRIMARY,b_i PRIMARY 4 NULL 2 Using where; Rowid-ordered scan; Using join buffer (flat, BNL join)
-1 SIMPLE t6 range|filter PRIMARY,b_i PRIMARY|b_i 4|5 NULL 3 (86%) Using where; Rowid-ordered scan; Using join buffer (incremental, BNL join); Using rowid filter
-1 SIMPLE t8 ref b_i b_i 5 test.t5.b 2 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t7 ref|filter PRIMARY,b_i b_i|PRIMARY 5|4 test.t5.b 1 (29%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
+1 SIMPLE t6 range PRIMARY,b_i PRIMARY 4 NULL 3 Using where; Rowid-ordered scan; Using join buffer (incremental, BNL join)
+1 SIMPLE t8 ref b_i b_i 5 test.t5.b 1 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
SELECT t5.a,t5.b,t6.a,t6.b,t7.a,t7.b,t8.a,t8.b
FROM t5
LEFT JOIN
@@ -2120,9 +2120,9 @@ FROM t5 LEFT JOIN
ON (t5.b=t8.b);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t5 ALL NULL NULL NULL NULL 2
-1 SIMPLE t6 ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
+1 SIMPLE t6 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t7 const PRIMARY PRIMARY 4 const 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t8 ALL b_i NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE t8 ref b_i b_i 5 test.t5.b 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
SELECT t5.a,t5.b,t6.a,t6.b,t7.a,t7.b,t8.a,t8.b
FROM t5 LEFT JOIN
(t6 LEFT JOIN t7 ON t7.a=1, t8)
@@ -2137,9 +2137,9 @@ FROM t5 LEFT JOIN
ON (t5.b=t8.b);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t5 ALL NULL NULL NULL NULL 2
-1 SIMPLE t6 ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
+1 SIMPLE t6 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t7 ref b_i b_i 5 const 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t8 ALL b_i NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE t8 ref b_i b_i 5 test.t5.b 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
SELECT t5.a,t5.b,t6.a,t6.b,t7.a,t7.b,t8.a,t8.b
FROM t5 LEFT JOIN
(t6 LEFT JOIN t7 ON t7.b=2, t8)
@@ -2154,7 +2154,7 @@ FROM t5 LEFT JOIN
ON (t5.b=t8.b);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t5 ALL NULL NULL NULL NULL 2
-1 SIMPLE t8 ALL b_i NULL NULL NULL 1 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t8 ref b_i b_i 5 test.t5.b 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t6 ALL NULL NULL NULL NULL 1 Using join buffer (incremental, BNL join)
1 SIMPLE t7 const PRIMARY PRIMARY 4 const 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
SELECT t5.a,t5.b,t6.a,t6.b,t7.a,t7.b,t8.a,t8.b
diff --git a/mysql-test/main/join_outer.result b/mysql-test/main/join_outer.result
index d6ab8c7dc9c..ce9dbfb8c3d 100644
--- a/mysql-test/main/join_outer.result
+++ b/mysql-test/main/join_outer.result
@@ -1230,7 +1230,7 @@ EXPLAIN
SELECT t1.id, a FROM t1 LEFT JOIN t2 ON t1.id=t2.id WHERE t2.b IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref idx idx 4 test.t1.id 2 Using where; Not exists
+1 SIMPLE t2 ref idx idx 4 test.t1.id 1 Using where; Not exists
flush status;
SELECT t1.id, a FROM t1 LEFT JOIN t2 ON t1.id=t2.id WHERE t2.b IS NULL;
id a
@@ -1430,7 +1430,7 @@ WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL
GROUP BY t2.f1, t2.f2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 system PRIMARY NULL NULL NULL 1
-1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index; Using filesort
+1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index
SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1
WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL
GROUP BY t2.f1, t2.f2;
@@ -1852,7 +1852,7 @@ WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL
GROUP BY t2.f1, t2.f2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 system PRIMARY NULL NULL NULL 1
-1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index; Using filesort
+1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index
SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1
WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL
GROUP BY t2.f1, t2.f2;
@@ -2080,7 +2080,7 @@ WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
ORDER BY t1.b;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ref idx idx 4 const 2 100.00 Using where
-1 SIMPLE t2 ref c c 5 test.t1.a 2 100.00
+1 SIMPLE t2 ref c c 5 test.t1.a 1 100.00
Warnings:
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` join `test`.`t1` where `test`.`t2`.`c` = `test`.`t1`.`a` and `test`.`t1`.`b` = 5 order by `test`.`t1`.`b`
SELECT t1.b, t2.c, t2.d FROM t2 JOIN t1 ON t2.c = t1.a
@@ -2097,7 +2097,7 @@ WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
ORDER BY t1.b;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ref PRIMARY,idx idx 4 const 2 100.00 Using where
-1 SIMPLE t2 ref c c 5 test.t1.a 2 100.00
+1 SIMPLE t2 ref c c 5 test.t1.a 1 100.00
Warnings:
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` join `test`.`t1` where `test`.`t2`.`c` = `test`.`t1`.`a` and `test`.`t1`.`b` = 5 order by `test`.`t1`.`b`
SELECT t1.b, t2.c, t2.d FROM t2 LEFT JOIN t1 ON t2.c = t1.a
@@ -2260,7 +2260,7 @@ create table t2 (a int, b int, c int, key(b), key(c));
insert into t2 select
@a:=A.a + 10*B.a+100*C.a,
IF(@a<900, NULL, @a),
-IF(@a<500, NULL, @a)
+IF(@a<400, NULL, @a)
from t1 A, t1 B, t1 C;
delete from t1 where a=0;
# Check that there are different #rows of NULLs for b and c, both !=10:
@@ -2269,7 +2269,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref b b 5 const 780 Using index condition
explain select * from t2 force index (c) where c is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref c c 5 const 393 Using index condition
+1 SIMPLE t2 ref c c 5 const 282 Using index condition
explain select * from t1 left join t2 on t2.b is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 9
diff --git a/mysql-test/main/join_outer.test b/mysql-test/main/join_outer.test
index 5e1e83e4049..8e4c343f247 100644
--- a/mysql-test/main/join_outer.test
+++ b/mysql-test/main/join_outer.test
@@ -1813,7 +1813,7 @@ create table t2 (a int, b int, c int, key(b), key(c));
insert into t2 select
@a:=A.a + 10*B.a+100*C.a,
IF(@a<900, NULL, @a),
- IF(@a<500, NULL, @a)
+ IF(@a<400, NULL, @a)
from t1 A, t1 B, t1 C;
delete from t1 where a=0;
diff --git a/mysql-test/main/join_outer_innodb.result b/mysql-test/main/join_outer_innodb.result
index 809a980576d..dbbbe89944b 100644
--- a/mysql-test/main/join_outer_innodb.result
+++ b/mysql-test/main/join_outer_innodb.result
@@ -435,47 +435,47 @@ left join t16 on t15.o1 = t16.p1
where t1.a10 = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a4,a6,a5,a7 NULL NULL NULL 3 Using where
-1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1 Using index
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where; Using index
-1 SIMPLE t8 eq_ref PRIMARY PRIMARY 1 test.t1.a4 1 Using index
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1 Using index
+1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where
+1 SIMPLE t10 eq_ref PRIMARY PRIMARY 1 test.t1.a6 1
+1 SIMPLE t8 eq_ref PRIMARY PRIMARY 1 test.t1.a4 1
+1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.d1 1 Using where
-1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where; Using index
+1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 1 test.t1.a7 1
1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t1.a5 1
1 SIMPLE t12 eq_ref PRIMARY PRIMARY 4 test.t11.k3 1 Using where
1 SIMPLE l2 eq_ref PRIMARY PRIMARY 4 test.t11.k4 1 Using where
1 SIMPLE t9 ref PRIMARY PRIMARY 1 test.t1.a4 1
-1 SIMPLE t13 ref PRIMARY,m3 m3 8 const,test.t1.a1 1 Using index
-1 SIMPLE l4 eq_ref PRIMARY PRIMARY 4 test.t13.m2 1 Using where; Using index
-1 SIMPLE m2 ref PRIMARY,m3 m3 8 const,test.t1.a1 1 Using index
+1 SIMPLE t13 ref PRIMARY,m3 m3 8 const,test.t1.a1 3 Using index
+1 SIMPLE l4 eq_ref PRIMARY PRIMARY 4 test.t13.m2 1 Using where
+1 SIMPLE m2 ref PRIMARY,m3 m3 8 const,test.t1.a1 3 Using index
1 SIMPLE l3 eq_ref PRIMARY PRIMARY 4 test.m2.m2 1 Using where
1 SIMPLE t14 eq_ref PRIMARY PRIMARY 2 test.t1.a8 1 Using where
-1 SIMPLE t15 eq_ref PRIMARY PRIMARY 2 test.t1.a9 1 Using where; Using index
+1 SIMPLE t15 eq_ref PRIMARY PRIMARY 2 test.t1.a9 1 Using where
1 SIMPLE t16 ref PRIMARY PRIMARY 2 test.t15.o1 1 Using where
-1 SIMPLE t10 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
explain select * from v1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a4,a6,a5,a7 NULL NULL NULL 3 Using where
-1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1 Using index
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where; Using index
-1 SIMPLE t8 eq_ref PRIMARY PRIMARY 1 test.t1.a4 1 Using index
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1 Using index
+1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where
+1 SIMPLE t10 eq_ref PRIMARY PRIMARY 1 test.t1.a6 1
+1 SIMPLE t8 eq_ref PRIMARY PRIMARY 1 test.t1.a4 1
+1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1
1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.d1 1 Using where
-1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where; Using index
+1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where
1 SIMPLE t7 eq_ref PRIMARY PRIMARY 1 test.t1.a7 1
1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t1.a5 1
1 SIMPLE t12 eq_ref PRIMARY PRIMARY 4 test.t11.k3 1 Using where
1 SIMPLE l2 eq_ref PRIMARY PRIMARY 4 test.t11.k4 1 Using where
1 SIMPLE t9 ref PRIMARY PRIMARY 1 test.t1.a4 1
-1 SIMPLE t13 ref PRIMARY,m3 m3 8 const,test.t1.a1 1 Using index
-1 SIMPLE l4 eq_ref PRIMARY PRIMARY 4 test.t13.m2 1 Using where; Using index
-1 SIMPLE m2 ref PRIMARY,m3 m3 8 const,test.t1.a1 1 Using index
+1 SIMPLE t13 ref PRIMARY,m3 m3 8 const,test.t1.a1 3 Using index
+1 SIMPLE l4 eq_ref PRIMARY PRIMARY 4 test.t13.m2 1 Using where
+1 SIMPLE m2 ref PRIMARY,m3 m3 8 const,test.t1.a1 3 Using index
1 SIMPLE l3 eq_ref PRIMARY PRIMARY 4 test.m2.m2 1 Using where
1 SIMPLE t14 eq_ref PRIMARY PRIMARY 2 test.t1.a8 1 Using where
-1 SIMPLE t15 eq_ref PRIMARY PRIMARY 2 test.t1.a9 1 Using where; Using index
+1 SIMPLE t15 eq_ref PRIMARY PRIMARY 2 test.t1.a9 1 Using where
1 SIMPLE t16 ref PRIMARY PRIMARY 2 test.t15.o1 1 Using where
-1 SIMPLE t10 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
drop view v1;
drop table t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12,t13,t14,t15,t16;
#
diff --git a/mysql-test/main/join_outer_jcl6.result b/mysql-test/main/join_outer_jcl6.result
index 8dcc41638bb..ff5e76b78ad 100644
--- a/mysql-test/main/join_outer_jcl6.result
+++ b/mysql-test/main/join_outer_jcl6.result
@@ -1237,7 +1237,7 @@ EXPLAIN
SELECT t1.id, a FROM t1 LEFT JOIN t2 ON t1.id=t2.id WHERE t2.b IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref idx idx 4 test.t1.id 2 Using where; Not exists; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ref idx idx 4 test.t1.id 1 Using where; Not exists; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
flush status;
SELECT t1.id, a FROM t1 LEFT JOIN t2 ON t1.id=t2.id WHERE t2.b IS NULL;
id a
@@ -1437,7 +1437,7 @@ WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL
GROUP BY t2.f1, t2.f2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 system PRIMARY NULL NULL NULL 1
-1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index; Using filesort
+1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index
SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1
WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL
GROUP BY t2.f1, t2.f2;
@@ -1859,7 +1859,7 @@ WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL
GROUP BY t2.f1, t2.f2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 system PRIMARY NULL NULL NULL 1
-1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index; Using filesort
+1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index
SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1
WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL
GROUP BY t2.f1, t2.f2;
@@ -2087,7 +2087,7 @@ WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
ORDER BY t1.b;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ref idx idx 4 const 2 100.00 Using where
-1 SIMPLE t2 ref c c 5 test.t1.a 2 100.00 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ref c c 5 test.t1.a 1 100.00 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
Warnings:
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` join `test`.`t1` where `test`.`t2`.`c` = `test`.`t1`.`a` and `test`.`t1`.`b` = 5 order by `test`.`t1`.`b`
SELECT t1.b, t2.c, t2.d FROM t2 JOIN t1 ON t2.c = t1.a
@@ -2104,7 +2104,7 @@ WHERE t1.pk BETWEEN 5 AND 6 AND t1.b IS NULL OR t1.b = 5
ORDER BY t1.b;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ref PRIMARY,idx idx 4 const 2 100.00 Using where
-1 SIMPLE t2 ref c c 5 test.t1.a 2 100.00 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ref c c 5 test.t1.a 1 100.00 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
Warnings:
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` join `test`.`t1` where `test`.`t2`.`c` = `test`.`t1`.`a` and `test`.`t1`.`b` = 5 order by `test`.`t1`.`b`
SELECT t1.b, t2.c, t2.d FROM t2 LEFT JOIN t1 ON t2.c = t1.a
@@ -2267,7 +2267,7 @@ create table t2 (a int, b int, c int, key(b), key(c));
insert into t2 select
@a:=A.a + 10*B.a+100*C.a,
IF(@a<900, NULL, @a),
-IF(@a<500, NULL, @a)
+IF(@a<400, NULL, @a)
from t1 A, t1 B, t1 C;
delete from t1 where a=0;
# Check that there are different #rows of NULLs for b and c, both !=10:
@@ -2276,7 +2276,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref b b 5 const 780 Using index condition
explain select * from t2 force index (c) where c is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref c c 5 const 393 Using index condition
+1 SIMPLE t2 ref c c 5 const 282 Using index condition
explain select * from t1 left join t2 on t2.b is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 9
diff --git a/mysql-test/main/key.result b/mysql-test/main/key.result
index 2e2c8d894f0..e6bb46af2b1 100644
--- a/mysql-test/main/key.result
+++ b/mysql-test/main/key.result
@@ -231,11 +231,14 @@ numeropost
1
EXPLAIN SELECT numeropost FROM t1 WHERE numreponse='1';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 const numreponse numreponse 4 const 1 Using index
+1 SIMPLE t1 const numreponse numreponse 4 const 1
FLUSH TABLES;
SELECT numeropost FROM t1 WHERE numreponse='1';
numeropost
1
+EXPLAIN SELECT numreponse+0 FROM t1 WHERE numreponse='1';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 const numreponse numreponse 4 const 1 Using index
drop table t1;
create table t1 (c varchar(30) character set utf8, t text character set utf8, unique (c(2)), unique (t(3))) engine=myisam;
show create table t1;
@@ -610,7 +613,7 @@ EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) > 12;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t1 range a a 5 NULL 5 Using where; Using index
+2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
SELECT 1 as RES FROM t1 AS t1_outer WHERE
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) > 12;
RES
@@ -628,19 +631,19 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
SHOW STATUS LIKE 'Last_query_cost';
Variable_name Value
-Last_query_cost 9.212184
+Last_query_cost 0.014784
EXPLAIN SELECT a, SUM( b ) FROM t1 USE INDEX( a ) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
SHOW STATUS LIKE 'Last_query_cost';
Variable_name Value
-Last_query_cost 9.212184
+Last_query_cost 0.014784
EXPLAIN SELECT a, SUM( b ) FROM t1 FORCE INDEX( a ) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 5 NULL 6
SHOW STATUS LIKE 'Last_query_cost';
Variable_name Value
-Last_query_cost 14.199000
+Last_query_cost 0.014784
DROP TABLE t1;
#
# MDEV-21480: Unique key using ref access though eq_ref access can be used
@@ -689,3 +692,42 @@ drop table t1,t2;
#
create table t1 (a int, b int, key(a), key(a desc));
drop table t1;
+# Check some issues with FORCE INDEX and full index scans
+# (Does FORCE INDEX force an index scan)
+#
+create table t1 (a int primary key, b int, c int, d int,
+key k1 (b) using BTREE, key k2 (c,d) using btree) engine=heap;
+insert into t1 select seq as a, seq as b, seq as c, seq as d
+from seq_1_to_100;
+explain select sum(a+b) from t1 force index (k1) where b>0 and a=99;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range k1 k1 5 NULL 100 Using where
+explain select sum(a+b) from t1 force index (k1) where a>0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 100 Using where
+explain select sum(a+b) from t1 force index (k1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 100
+explain select sum(a+b) from t1 force index for join (k1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 100
+explain select sum(a+b) from t1 force index for order by (k1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 100
+explain select sum(a+b) from t1 force index (k1,k2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 100
+select sum(a+b) from t1 force index (k1);
+sum(a+b)
+10100
+explain select sum(a+b) from t1 force index (primary);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 100
+select sum(a+b) from t1 force index (primary);
+sum(a+b)
+10100
+explain select straight_join sum(a+b) from seq_1_to_10 as s, t1 force index (k2) where t1.a=s.seq;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE s index PRIMARY PRIMARY 8 NULL 10 Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 100 Using where; Using join buffer (flat, BNL join)
+drop table t1;
diff --git a/mysql-test/main/key.test b/mysql-test/main/key.test
index 29e08b8834a..7ec18ad5d0a 100644
--- a/mysql-test/main/key.test
+++ b/mysql-test/main/key.test
@@ -227,9 +227,12 @@ drop table t1;
CREATE TABLE t1 (numeropost mediumint(8) unsigned NOT NULL default '0', numreponse int(10) unsigned NOT NULL auto_increment, PRIMARY KEY (numeropost,numreponse), UNIQUE KEY numreponse (numreponse));
INSERT INTO t1 (numeropost,numreponse) VALUES ('1','1'),('1','2'),('2','3'),('2','4');
SELECT numeropost FROM t1 WHERE numreponse='1';
+# No 'Using index'
EXPLAIN SELECT numeropost FROM t1 WHERE numreponse='1';
FLUSH TABLES;
SELECT numeropost FROM t1 WHERE numreponse='1';
+# This one will have 'Using index'
+EXPLAIN SELECT numreponse+0 FROM t1 WHERE numreponse='1';
drop table t1;
#
@@ -608,3 +611,23 @@ drop table t1,t2;
--echo #
create table t1 (a int, b int, key(a), key(a desc));
drop table t1;
+
+--echo # Check some issues with FORCE INDEX and full index scans
+--echo # (Does FORCE INDEX force an index scan)
+--echo #
+
+create table t1 (a int primary key, b int, c int, d int,
+key k1 (b) using BTREE, key k2 (c,d) using btree) engine=heap;
+insert into t1 select seq as a, seq as b, seq as c, seq as d
+from seq_1_to_100;
+explain select sum(a+b) from t1 force index (k1) where b>0 and a=99;
+explain select sum(a+b) from t1 force index (k1) where a>0;
+explain select sum(a+b) from t1 force index (k1);
+explain select sum(a+b) from t1 force index for join (k1);
+explain select sum(a+b) from t1 force index for order by (k1);
+explain select sum(a+b) from t1 force index (k1,k2);
+select sum(a+b) from t1 force index (k1);
+explain select sum(a+b) from t1 force index (primary);
+select sum(a+b) from t1 force index (primary);
+explain select straight_join sum(a+b) from seq_1_to_10 as s, t1 force index (k2) where t1.a=s.seq;
+drop table t1;
diff --git a/mysql-test/main/key_cache.result b/mysql-test/main/key_cache.result
index 4a5df2da65d..0ac03750081 100644
--- a/mysql-test/main/key_cache.result
+++ b/mysql-test/main/key_cache.result
@@ -134,7 +134,7 @@ i
explain select count(*) from t1, t2 where t1.p = t2.i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 2 Using index
-1 SIMPLE t2 ref k1 k1 5 test.t1.p 2 Using index
+1 SIMPLE t2 ref k1 k1 5 test.t1.p 1 Using index
select count(*) from t1, t2 where t1.p = t2.i;
count(*)
3
@@ -434,31 +434,31 @@ p i a
3 1 yyyy
4 3 zzzz
update t1 set p=3 where p=1;
-update t2 set i=2 where i=1;
+set statement optimizer_scan_setup_cost=0 for update t2 set i=2 where i=1;
select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
VARIABLE_NAME VARIABLE_VALUE
KEY_BLOCKS_NOT_FLUSHED 0
KEY_BLOCKS_USED 4
KEY_BLOCKS_WARM 0
-KEY_READ_REQUESTS 22
+KEY_READ_REQUESTS 21
KEY_READS 0
KEY_WRITE_REQUESTS 26
KEY_WRITES 6
select variable_value into @key_blocks_unused from information_schema.session_status where variable_name = 'Key_blocks_unused';
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default NULL NULL 2097152 1024 4 # 0 22 0 26 6
+default NULL NULL 2097152 1024 4 # 0 21 0 26 6
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
delete from t2 where a='zzzz';
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default NULL NULL 2097152 1024 4 # 0 29 0 32 9
+default NULL NULL 2097152 1024 4 # 0 28 0 32 9
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
delete from t1;
delete from t2;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default NULL NULL 2097152 1024 4 # 0 29 0 32 9
+default NULL NULL 2097152 1024 4 # 0 28 0 32 9
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
set global key_cache_segments=2;
select @@key_cache_segments;
@@ -482,13 +482,13 @@ p i a
3 1 yyyy
4 3 zzzz
update t1 set p=3 where p=1;
-update t2 set i=2 where i=1;
+set statement optimizer_scan_setup_cost=0 for update t2 set i=2 where i=1;
select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
VARIABLE_NAME VARIABLE_VALUE
KEY_BLOCKS_NOT_FLUSHED 0
KEY_BLOCKS_USED 4
KEY_BLOCKS_WARM 0
-KEY_READ_REQUESTS 22
+KEY_READ_REQUESTS 21
KEY_READS 0
KEY_WRITE_REQUESTS 26
KEY_WRITES 6
@@ -497,13 +497,13 @@ variable_value < @key_blocks_unused
1
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 2097152 1024 4 # 0 22 0 26 6
+default 2 NULL 2097152 1024 4 # 0 21 0 26 6
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
delete from t1;
delete from t2;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 2097152 1024 4 # 0 22 0 26 6
+default 2 NULL 2097152 1024 4 # 0 21 0 26 6
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
set global key_cache_segments=1;
select @@key_cache_segments;
@@ -527,13 +527,13 @@ p i a
3 1 yyyy
4 3 zzzz
update t1 set p=3 where p=1;
-update t2 set i=2 where i=1;
+set statement optimizer_scan_setup_cost=0 for update t2 set i=2 where i=1;
select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
VARIABLE_NAME VARIABLE_VALUE
KEY_BLOCKS_NOT_FLUSHED 0
KEY_BLOCKS_USED 4
KEY_BLOCKS_WARM 0
-KEY_READ_REQUESTS 22
+KEY_READ_REQUESTS 21
KEY_READS 0
KEY_WRITE_REQUESTS 26
KEY_WRITES 6
@@ -542,13 +542,13 @@ variable_value = @key_blocks_unused
1
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 1 NULL 2097152 1024 4 # 0 22 0 26 6
+default 1 NULL 2097152 1024 4 # 0 21 0 26 6
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
delete from t1;
delete from t2;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 1 NULL 2097152 1024 4 # 0 22 0 26 6
+default 1 NULL 2097152 1024 4 # 0 21 0 26 6
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
flush tables;
flush status;
@@ -583,10 +583,10 @@ p i a
3 1 yyyy
4 3 zzzz
update t1 set p=3 where p=1;
-update t2 set i=2 where i=1;
+set statement optimizer_scan_setup_cost=0 for update t2 set i=2 where i=1;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 4 # 0 22 0 26 6
+default 2 NULL 32768 1024 4 # 0 21 0 26 6
small NULL NULL 1048576 1024 1 # 0 0 0 0 0
insert into t1(a) select a from t1;
insert into t1(a) select a from t1;
@@ -606,7 +606,7 @@ insert into t2(i,a) select i,a from t2;
insert into t2(i,a) select i,a from t2;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 6733 # 3684 103
+default 2 NULL 32768 1024 # # 0 6732 # 3684 103
small NULL NULL 1048576 1024 # # 0 0 # 0 0
select * from t1 where p between 1010 and 1020 ;
p a
@@ -625,12 +625,16 @@ p i a
1020 3 zzzz
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 6750 # 3684 103
+default 2 NULL 32768 1024 # # 0 6749 # 3684 103
small NULL NULL 1048576 1024 # # 0 0 # 0 0
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
flush tables;
flush status;
update t1 set a='zzzz' where a='qqqq';
-update t2 set i=1 where i=2;
+set statement optimizer_scan_setup_cost=0 for update t2 set i=1 where i=2;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
default 2 NULL 32768 1024 # # 0 3076 18 1552 18
diff --git a/mysql-test/main/key_cache.test b/mysql-test/main/key_cache.test
index 9bd57f017f0..baca5c07fec 100644
--- a/mysql-test/main/key_cache.test
+++ b/mysql-test/main/key_cache.test
@@ -299,7 +299,7 @@ insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
select * from t1;
select * from t2;
update t1 set p=3 where p=1;
-update t2 set i=2 where i=1;
+set statement optimizer_scan_setup_cost=0 for update t2 set i=2 where i=1;
select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
select variable_value into @key_blocks_unused from information_schema.session_status where variable_name = 'Key_blocks_unused';
@@ -331,8 +331,7 @@ insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
select * from t1;
select * from t2;
update t1 set p=3 where p=1;
-update t2 set i=2 where i=1;
-
+set statement optimizer_scan_setup_cost=0 for update t2 set i=2 where i=1;
select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
select variable_value < @key_blocks_unused from information_schema.session_status where variable_name = 'Key_blocks_unused';
@@ -357,7 +356,7 @@ insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
select * from t1;
select * from t2;
update t1 set p=3 where p=1;
-update t2 set i=2 where i=1;
+set statement optimizer_scan_setup_cost=0 for update t2 set i=2 where i=1;
select * from information_schema.session_status where variable_name like 'key_%' and variable_name != 'Key_blocks_unused';
select variable_value = @key_blocks_unused from information_schema.session_status where variable_name = 'Key_blocks_unused';
@@ -389,7 +388,7 @@ insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'),
select * from t1;
select * from t2;
update t1 set p=3 where p=1;
-update t2 set i=2 where i=1;
+set statement optimizer_scan_setup_cost=0 for update t2 set i=2 where i=1;
--replace_column 7 #
select * from information_schema.key_caches where segment_number is null;
@@ -422,9 +421,10 @@ select * from t2 where p between 1010 and 1020 ;
--replace_column 6 # 7 # 10 #
select * from information_schema.key_caches where segment_number is null;
+analyze table t2;
flush tables; flush status;
update t1 set a='zzzz' where a='qqqq';
-update t2 set i=1 where i=2;
+set statement optimizer_scan_setup_cost=0 for update t2 set i=1 where i=2;
--replace_column 6 # 7 #
select * from information_schema.key_caches where segment_number is null;
diff --git a/mysql-test/main/key_diff.result b/mysql-test/main/key_diff.result
index af928fcb203..f419be55208 100644
--- a/mysql-test/main/key_diff.result
+++ b/mysql-test/main/key_diff.result
@@ -36,7 +36,7 @@ a a a a
explain select t1.*,t2.* from t1,t1 as t2 where t1.A=t2.B;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a NULL NULL NULL 5
-1 SIMPLE t2 ALL b NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ref b b 4 test.t1.a 1 Using index condition
select t1.*,t2.* from t1,t1 as t2 where t1.A=t2.B order by binary t1.a,t2.a;
a b a b
A B a a
diff --git a/mysql-test/main/limit_rows_examined.result b/mysql-test/main/limit_rows_examined.result
index f0a22b8f3f2..9d3d5bbf0ab 100644
--- a/mysql-test/main/limit_rows_examined.result
+++ b/mysql-test/main/limit_rows_examined.result
@@ -211,45 +211,42 @@ explain
select * from t1
where c1 IN (select * from t2 where c2 > ' ' LIMIT ROWS EXAMINED 11);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 2 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
select * from t1
where c1 IN (select * from t2 where c2 > ' ' LIMIT ROWS EXAMINED 11);
c1
bb
-Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 12 rows, which exceeds LIMIT ROWS EXAMINED (11). The query result may be incomplete
+cc
+dd
explain
select * from t1
where c1 IN (select * from t2 where c2 > ' ')
LIMIT ROWS EXAMINED 11;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 2 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
select * from t1
where c1 IN (select * from t2 where c2 > ' ')
LIMIT ROWS EXAMINED 11;
c1
bb
-Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 12 rows, which exceeds LIMIT ROWS EXAMINED (11). The query result may be incomplete
+cc
+dd
explain
select * from t1
where c1 IN (select * from t2 where c2 > ' ' LIMIT ROWS EXAMINED 0)
LIMIT ROWS EXAMINED 11;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 2 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
select * from t1
where c1 IN (select * from t2 where c2 > ' ' LIMIT ROWS EXAMINED 0)
LIMIT ROWS EXAMINED 11;
c1
bb
-Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 12 rows, which exceeds LIMIT ROWS EXAMINED (11). The query result may be incomplete
+cc
+dd
explain
select * from t1i
where c1 IN (select * from t2i where c2 > ' ')
@@ -416,24 +413,22 @@ c1
bb
cc
dd
-select * from v1 LIMIT ROWS EXAMINED 17;
+select * from v1 LIMIT ROWS EXAMINED 10;
c1
bb
cc
dd
-Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 18 rows, which exceeds LIMIT ROWS EXAMINED (17). The query result may be incomplete
-select * from v1 LIMIT ROWS EXAMINED 16;
+select * from v1 LIMIT ROWS EXAMINED 8;
c1
bb
cc
+dd
Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 17 rows, which exceeds LIMIT ROWS EXAMINED (16). The query result may be incomplete
-select * from v1 LIMIT ROWS EXAMINED 11;
+Warning 1931 Query execution was interrupted. The query examined at least 9 rows, which exceeds LIMIT ROWS EXAMINED (8). The query result may be incomplete
+select * from v1 LIMIT ROWS EXAMINED 3;
c1
-bb
Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 12 rows, which exceeds LIMIT ROWS EXAMINED (11). The query result may be incomplete
+Warning 1931 Query execution was interrupted. The query examined at least 4 rows, which exceeds LIMIT ROWS EXAMINED (3). The query result may be incomplete
drop view v1;
explain
select *
@@ -441,17 +436,16 @@ from (select * from t1
where c1 IN (select * from t2 where c2 > ' ' LIMIT ROWS EXAMINED 0)) as tmp
LIMIT ROWS EXAMINED 11;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 2 func 1
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
select *
from (select * from t1
where c1 IN (select * from t2 where c2 > ' ' LIMIT ROWS EXAMINED 0)) as tmp
LIMIT ROWS EXAMINED 11;
c1
bb
-Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 12 rows, which exceeds LIMIT ROWS EXAMINED (11). The query result may be incomplete
+cc
+dd
=========================================================================
Aggregation
=========================================================================
diff --git a/mysql-test/main/limit_rows_examined.test b/mysql-test/main/limit_rows_examined.test
index 2315580410f..512058e1eb6 100644
--- a/mysql-test/main/limit_rows_examined.test
+++ b/mysql-test/main/limit_rows_examined.test
@@ -277,9 +277,9 @@ create view v1 as
select * from t1 where c1 IN (select * from t2 where c2 > ' ');
select * from v1;
-select * from v1 LIMIT ROWS EXAMINED 17;
-select * from v1 LIMIT ROWS EXAMINED 16;
-select * from v1 LIMIT ROWS EXAMINED 11;
+select * from v1 LIMIT ROWS EXAMINED 10;
+select * from v1 LIMIT ROWS EXAMINED 8;
+select * from v1 LIMIT ROWS EXAMINED 3;
drop view v1;
diff --git a/mysql-test/main/lock_sync-master.opt b/mysql-test/main/lock_sync-master.opt
index a6700b8d18e..96f0ce3f36c 100644
--- a/mysql-test/main/lock_sync-master.opt
+++ b/mysql-test/main/lock_sync-master.opt
@@ -1,2 +1 @@
--default-storage-engine=MyISAM
---innodb-defragment=0
diff --git a/mysql-test/main/lock_sync.result b/mysql-test/main/lock_sync.result
index 2d7a175e420..af3fbe8a784 100644
--- a/mysql-test/main/lock_sync.result
+++ b/mysql-test/main/lock_sync.result
@@ -67,8 +67,6 @@ declare j int;
select i from t1 where i = 1 into j;
return j;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f2() returns int
begin
declare k int;
@@ -76,8 +74,6 @@ select i from t1 where i = 1 into k;
insert into t2 values (k + 5);
return 0;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f3() returns int
begin
return (select i from t1 where i = 3);
@@ -101,16 +97,12 @@ declare k int;
select i from v1 where i = 1 into k;
return k;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f7() returns int
begin
declare k int;
select j from v2 where j = 1 into k;
return k;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f8() returns int
begin
declare k int;
@@ -118,8 +110,6 @@ select i from v1 where i = 1 into k;
insert into t2 values (k+5);
return k;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f9() returns int
begin
update v2 set j=j+10 where j=1;
@@ -149,8 +139,6 @@ create procedure p2(inout p int)
begin
select i from t1 where i = 1 into p;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f14() returns int
begin
declare k int;
@@ -178,8 +166,6 @@ select i from t1 where i = 1 into j;
call p3;
return 1;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create procedure p3()
begin
create temporary table if not exists temp1 (a int);
@@ -192,8 +178,6 @@ declare k int;
select i from t1 where i=1 into k;
set new.l= k+1;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create trigger t4_bu before update on t4 for each row
begin
if (select i from t1 where i=1) then
diff --git a/mysql-test/main/locking_clause.result b/mysql-test/main/locking_clause.result
index d2ebfe4799f..8cfdfb91037 100644
--- a/mysql-test/main/locking_clause.result
+++ b/mysql-test/main/locking_clause.result
@@ -149,16 +149,8 @@ DROP USER test2@localhost;
# MYSQL 8
#
SELECT 1 FROM DUAL LIMIT 1 INTO @var FOR UPDATE;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT 1 FROM DUAL LIMIT 1 FOR UPDATE INTO @var;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT 1 FROM DUAL LIMIT 1 INTO @var FOR UPDATE INTO @var;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'INTO @var' at line 1
SELECT 1 UNION SELECT 1 FOR UPDATE INTO @var;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT 1 UNION SELECT 1 INTO @var FOR UPDATE;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
diff --git a/mysql-test/main/log_slow_debug.result b/mysql-test/main/log_slow_debug.result
index 16d67d7fdc7..eaf38425f8b 100644
--- a/mysql-test/main/log_slow_debug.result
+++ b/mysql-test/main/log_slow_debug.result
@@ -6,6 +6,8 @@ SET @@GLOBAL.log_output='TABLE';
FLUSH SLOW LOGS;
SET @@GLOBAL.slow_query_log=ON;
SET @@GLOBAL.log_slow_admin_statements=ON;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
SET @saved_dbug = @@debug_dbug;
SET SESSION debug_dbug="+d,simulate_slow_query";
CREATE PROCEDURE show_slow_log()
@@ -19,6 +21,8 @@ $$
# Expect all admin statements in the slow log (ON,DEFAULT)
#
SET @@SESSION.log_slow_admin_statements=ON;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
SET log_slow_filter=DEFAULT;
TRUNCATE TABLE mysql.slow_log;
CREATE TABLE t1 (a INT);
@@ -66,6 +70,8 @@ sql_text
# Expect all admin statements in the slow log (ON,admin)
#
SET @@SESSION.log_slow_admin_statements=ON;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
SET log_slow_filter=admin;
TRUNCATE TABLE mysql.slow_log;
CREATE TABLE t1 (a INT);
@@ -100,6 +106,8 @@ sql_text
# Expect none of admin DDL statements in the slow log (ON,filesort)
#
SET @@SESSION.log_slow_admin_statements=ON;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
SET log_slow_filter=filesort;
TRUNCATE TABLE mysql.slow_log;
CREATE TABLE t1 (a INT);
@@ -127,6 +135,8 @@ sql_text
# Expect none of admin statements in the slow log (OFF,DEFAULT)
#
SET @@SESSION.log_slow_admin_statements=OFF;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
SET log_slow_filter=DEFAULT;
TRUNCATE TABLE mysql.slow_log;
CREATE TABLE t1 (a INT);
@@ -171,7 +181,11 @@ sql_text
# prevent enabling globally suppressed logging by setting the session variable to ON.
#
SET @@GLOBAL.log_slow_admin_statements=OFF;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
SET @@SESSION.log_slow_admin_statements=ON;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
SET log_slow_filter=DEFAULT;
TRUNCATE TABLE mysql.slow_log;
CREATE TABLE t1 (a INT);
@@ -223,4 +237,6 @@ TRUNCATE mysql.slow_log;
SET @@global.slow_query_log= @org_slow_query_log;
SET @@global.log_output= @org_log_output;
SET @@global.log_slow_admin_statements= @org_log_slow_admin_statements;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
DROP PROCEDURE show_slow_log;
diff --git a/mysql-test/main/log_tables_upgrade.test b/mysql-test/main/log_tables_upgrade.test
index 36802fb38ad..0b69c062d77 100644
--- a/mysql-test/main/log_tables_upgrade.test
+++ b/mysql-test/main/log_tables_upgrade.test
@@ -24,5 +24,5 @@ RENAME TABLE test.bug49823 TO general_log;
DROP TABLE general_log;
RENAME TABLE renamed_general_log TO general_log;
SET GLOBAL general_log = @saved_general_log;
-remove_file $MYSQLD_DATADIR/mysql_upgrade_info;
+remove_file $MYSQLD_DATADIR/mariadb_upgrade_info;
USE test;
diff --git a/mysql-test/main/long_unique.result b/mysql-test/main/long_unique.result
index 7f8110a283e..664c9e82ec6 100644
--- a/mysql-test/main/long_unique.result
+++ b/mysql-test/main/long_unique.result
@@ -1195,20 +1195,20 @@ ERROR 42S22: Unknown column 'DB_ROW_HASH_1' in 'IN/ALL/ANY subquery'
select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
DB_ROW_HASH_1 DB_ROW_HASH_2
11 1
-22 2
-33 3
-44 4
11 1
-22 2
-33 3
-44 4
11 1
-22 2
-33 3
-44 4
11 1
22 2
+22 2
+22 2
+22 2
+33 3
+33 3
33 3
+33 3
+44 4
+44 4
+44 4
44 4
select * from t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t1);
DB_ROW_HASH_1 DB_ROW_HASH_2
diff --git a/mysql-test/main/long_unique.test b/mysql-test/main/long_unique.test
index fa49b6a5ad4..3203675a7ad 100644
--- a/mysql-test/main/long_unique.test
+++ b/mysql-test/main/long_unique.test
@@ -389,6 +389,7 @@ select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1;
select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2;
--error ER_BAD_FIELD_ERROR
select * from t1 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
+--sorted_result
select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
select * from t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t1);
--error ER_BAD_FIELD_ERROR
diff --git a/mysql-test/main/lowercase_fs_off.test b/mysql-test/main/lowercase_fs_off.test
index ab46d2ac650..172566a1365 100644
--- a/mysql-test/main/lowercase_fs_off.test
+++ b/mysql-test/main/lowercase_fs_off.test
@@ -128,7 +128,7 @@ show triggers like '%T1%';
drop table t1;
let $datadir= `select @@datadir`;
-remove_file $datadir/mysql_upgrade_info;
+remove_file $datadir/mariadb_upgrade_info;
set GLOBAL sql_mode=default;
diff --git a/mysql-test/main/mdev-25830.result b/mysql-test/main/mdev-25830.result
index e62d1ff3f55..6de3e5836d3 100644
--- a/mysql-test/main/mdev-25830.result
+++ b/mysql-test/main/mdev-25830.result
@@ -25,8 +25,8 @@ ORDER BY sysapproval_approver0.`order`
LIMIT 0, 50 ;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE task2 range PRIMARY,sys_class_name_2,sys_domain_path PRIMARY 96 NULL 1 0.00 100.00 100.00 Using where; Using temporary; Using filesort
-1 SIMPLE task1 ref PRIMARY,task_parent,sys_class_name_2,sys_domain_path task_parent 99 test.task2.sys_id 1 NULL 100.00 NULL Using index condition; Using where
-1 SIMPLE sysapproval_approver0 ref sysapproval_approver_ref5,sys_domain_path,sysapproval_approver_CHG1975376 sysapproval_approver_ref5 99 test.task1.sys_id 1 NULL 100.00 NULL Using index condition; Using where
+1 SIMPLE task1 ref PRIMARY,task_parent,sys_class_name_2,sys_domain_path task_parent 99 test.task2.sys_id 2 NULL 100.00 NULL Using index condition; Using where
+1 SIMPLE sysapproval_approver0 ref sysapproval_approver_ref5,sys_domain_path,sysapproval_approver_CHG1975376 sysapproval_approver_ref5 99 test.task1.sys_id 3 NULL 100.00 NULL Using index condition; Using where
set optimizer_use_condition_selectivity=4;
analyze SELECT sysapproval_approver0.`sys_id`
FROM ((sysapproval_approver sysapproval_approver0
@@ -47,9 +47,9 @@ WHERE task2.`sys_id` LIKE '8e7792a7dbfffb00fff8a345ca961934%'
ORDER BY sysapproval_approver0.`order`
LIMIT 0, 50 ;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE task2 range PRIMARY,sys_class_name_2,sys_domain_path PRIMARY 96 NULL 1 0.00 98.00 100.00 Using where; Using temporary; Using filesort
-1 SIMPLE task1 ref PRIMARY,task_parent,sys_class_name_2,sys_domain_path task_parent 99 test.task2.sys_id 1 NULL 100.00 NULL Using index condition; Using where
-1 SIMPLE sysapproval_approver0 ref sysapproval_approver_ref5,sys_domain_path,sysapproval_approver_CHG1975376 sysapproval_approver_ref5 99 test.task1.sys_id 1 NULL 100.00 NULL Using index condition; Using where
+1 SIMPLE task2 range PRIMARY,sys_class_name_2,sys_domain_path PRIMARY 96 NULL 1 0.00 100.00 100.00 Using where; Using temporary; Using filesort
+1 SIMPLE task1 ref PRIMARY,task_parent,sys_class_name_2,sys_domain_path task_parent 99 test.task2.sys_id 2 NULL 100.00 NULL Using index condition; Using where
+1 SIMPLE sysapproval_approver0 ref sysapproval_approver_ref5,sys_domain_path,sysapproval_approver_CHG1975376 sysapproval_approver_ref5 99 test.task1.sys_id 3 NULL 100.00 NULL Using index condition; Using where
drop table sysapproval_approver,task;
set global innodb_stats_persistent= @innodb_stats_persistent_save;
set global innodb_stats_persistent_sample_pages=
diff --git a/mysql-test/main/merge.result b/mysql-test/main/merge.result
index 230fcf48e9d..1eecb3e34ca 100644
--- a/mysql-test/main/merge.result
+++ b/mysql-test/main/merge.result
@@ -2804,8 +2804,6 @@ CREATE TABLE tm1 (c1 INT) ENGINE=MRG_MYISAM UNION=(t1)
INSERT_METHOD=LAST;
CREATE TRIGGER tm1_ai AFTER INSERT ON tm1
FOR EACH ROW SELECT max(c1) FROM t1 INTO @var;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
LOCK TABLE tm1 WRITE, t1 WRITE;
INSERT INTO tm1 VALUES (1);
SELECT * FROM tm1;
@@ -2830,8 +2828,6 @@ CREATE TABLE tm1 (c1 INT) ENGINE=MRG_MYISAM UNION=(t1,t2,t3,t4,t5)
INSERT_METHOD=LAST;
CREATE TRIGGER t2_au AFTER UPDATE ON t2
FOR EACH ROW SELECT MAX(c1) FROM t1 INTO @var;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE FUNCTION f1() RETURNS INT
RETURN (SELECT MAX(c1) FROM t4);
LOCK TABLE tm1 WRITE, t1 WRITE, t2 WRITE, t3 WRITE, t4 WRITE, t5 WRITE;
@@ -3929,3 +3925,45 @@ drop table tm, t;
#
# End of 10.8 tests
#
+#
+# MDEV-30088 Assertion `cond_selectivity <= 1.0' failed in get_range_limit_read_cost
+#
+CREATE TABLE t1 (a TIMESTAMP, KEY(a)) ENGINE=MRG_MyISAM;
+explain SELECT a, COUNT(*) FROM t1 WHERE a >= '2000-01-01 00:00:00' GROUP BY a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+SELECT a, COUNT(*) FROM t1 WHERE a >= '2000-01-01 00:00:00' GROUP BY a;
+a COUNT(*)
+DROP TABLE t1;
+#
+# MDEV-30525: Assertion `ranges > 0' fails in IO_AND_CPU_COST handler::keyread_time
+#
+CREATE TABLE t1 (a INT, KEY(a)) ENGINE=MyISAM;
+CREATE TABLE t2 (a INT, KEY(a)) ENGINE=MyISAM;
+CREATE TABLE tm (a INT, KEY(a)) ENGINE=MRG_MyISAM UNION=(t1,t2);
+SELECT DISTINCT a FROM tm WHERE a > 50;
+a
+DROP TABLE tm, t1, t2;
+# Testcase 2:
+CREATE TABLE t1 (a INT, KEY(a)) ENGINE=MyISAM;
+CREATE TABLE t2 (a INT, KEY(a)) ENGINE=MyISAM;
+CREATE TABLE tm (a INT, KEY(a)) ENGINE=MERGE UNION = (t1, t2) INSERT_METHOD=FIRST;
+ANALYZE TABLE tm PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.tm analyze status Engine-independent statistics collected
+test.tm analyze note The storage engine for the table doesn't support analyze
+SELECT DISTINCT a FROM (SELECT * FROM tm WHERE a iS NOT NULL) AS sq;
+a
+DROP TABLE tm, t1, t2;
+#
+# MDEV-30568 Assertion `cond_selectivity <= 1.000000001' failed in get_range_limit_read_cost
+#
+CREATE TABLE t1 (f INT, KEY(f)) ENGINE=MyISAM;
+CREATE TABLE t2 (f INT, KEY(f)) ENGINE=MyISAM;
+CREATE TABLE tm (f INT, KEY(f)) ENGINE=MERGE UNION = (t1, t2);
+SELECT DISTINCT f FROM tm WHERE f IN (47, 126, 97, 48, 73, 0);
+f
+DROP TABLE tm, t1, t2;
+#
+# End of 11.0 tests
+#
diff --git a/mysql-test/main/merge.test b/mysql-test/main/merge.test
index 0485f3ed1c3..6b88d427fb4 100644
--- a/mysql-test/main/merge.test
+++ b/mysql-test/main/merge.test
@@ -2886,3 +2886,42 @@ drop table tm, t;
--echo #
--echo # End of 10.8 tests
--echo #
+
+--echo #
+--echo # MDEV-30088 Assertion `cond_selectivity <= 1.0' failed in get_range_limit_read_cost
+--echo #
+
+CREATE TABLE t1 (a TIMESTAMP, KEY(a)) ENGINE=MRG_MyISAM;
+explain SELECT a, COUNT(*) FROM t1 WHERE a >= '2000-01-01 00:00:00' GROUP BY a;
+SELECT a, COUNT(*) FROM t1 WHERE a >= '2000-01-01 00:00:00' GROUP BY a;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-30525: Assertion `ranges > 0' fails in IO_AND_CPU_COST handler::keyread_time
+--echo #
+CREATE TABLE t1 (a INT, KEY(a)) ENGINE=MyISAM;
+CREATE TABLE t2 (a INT, KEY(a)) ENGINE=MyISAM;
+CREATE TABLE tm (a INT, KEY(a)) ENGINE=MRG_MyISAM UNION=(t1,t2);
+SELECT DISTINCT a FROM tm WHERE a > 50;
+DROP TABLE tm, t1, t2;
+
+--echo # Testcase 2:
+CREATE TABLE t1 (a INT, KEY(a)) ENGINE=MyISAM;
+CREATE TABLE t2 (a INT, KEY(a)) ENGINE=MyISAM;
+CREATE TABLE tm (a INT, KEY(a)) ENGINE=MERGE UNION = (t1, t2) INSERT_METHOD=FIRST;
+ANALYZE TABLE tm PERSISTENT FOR ALL;
+SELECT DISTINCT a FROM (SELECT * FROM tm WHERE a iS NOT NULL) AS sq;
+DROP TABLE tm, t1, t2;
+
+--echo #
+--echo # MDEV-30568 Assertion `cond_selectivity <= 1.000000001' failed in get_range_limit_read_cost
+--echo #
+CREATE TABLE t1 (f INT, KEY(f)) ENGINE=MyISAM;
+CREATE TABLE t2 (f INT, KEY(f)) ENGINE=MyISAM;
+CREATE TABLE tm (f INT, KEY(f)) ENGINE=MERGE UNION = (t1, t2);
+SELECT DISTINCT f FROM tm WHERE f IN (47, 126, 97, 48, 73, 0);
+DROP TABLE tm, t1, t2;
+
+--echo #
+--echo # End of 11.0 tests
+--echo #
diff --git a/mysql-test/main/metadata.result b/mysql-test/main/metadata.result
index 5786e2fd461..ab28cab52cb 100644
--- a/mysql-test/main/metadata.result
+++ b/mysql-test/main/metadata.result
@@ -147,7 +147,7 @@ id data data
2 female no
select t1.id from t1 union select t2.id from t2;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def id id 246 4 1 Y 32768 0 63
+def id id 246 4 1 Y 49152 0 63
id
1
2
@@ -158,7 +158,7 @@ insert into t1 values (2,'two');
set @arg00=1 ;
select @arg00 FROM t1 where a=1 union distinct select 1 FROM t1 where a=1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def @arg00 @arg00 8 20 1 Y 32768 0 63
+def @arg00 @arg00 8 20 1 Y 49152 0 63
@arg00
1
select * from (select @arg00) aaa;
@@ -168,12 +168,12 @@ def aaa @arg00 @arg00 8 20 1 Y 32768 0 63
1
select 1 union select 1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def 1 1 3 1 1 N 32769 0 63
+def 1 1 3 1 1 N 49153 0 63
1
1
select * from (select 1 union select 1) aaa;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def aaa 1 1 3 1 1 N 32769 0 63
+def aaa 1 1 3 1 1 N 49153 0 63
1
1
drop table t1;
diff --git a/mysql-test/main/mrr_derived_crash_4610.result b/mysql-test/main/mrr_derived_crash_4610.result
index 3e38a0d4218..d7800e8a2a9 100644
--- a/mysql-test/main/mrr_derived_crash_4610.result
+++ b/mysql-test/main/mrr_derived_crash_4610.result
@@ -7,7 +7,7 @@ explain select 1 from
(select f2, f3, val, count(id) from t4 join t2 left join t3 on 0) top
join t1 on f1 = f3 where f3 = 'aaaa' order by val;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 const PRIMARY PRIMARY 12 const 1 Using index
+1 PRIMARY t1 const PRIMARY PRIMARY 12 const 1
1 PRIMARY <derived2> ref key0 key0 13 const 0 Using where; Using filesort
2 DERIVED t4 ALL NULL NULL NULL NULL 1
2 DERIVED t2 ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
diff --git a/mysql-test/main/multi_update.result b/mysql-test/main/multi_update.result
index 2112c6d5480..bf011f66be0 100644
--- a/mysql-test/main/multi_update.result
+++ b/mysql-test/main/multi_update.result
@@ -1304,27 +1304,24 @@ t1.c1 > 1 and
exists (select 'X' from t2 where t2.c1 = t1.c1 and t2.c2 > 4);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL idx NULL NULL NULL 8 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
+1 PRIMARY t2 range idx idx 5 NULL 3 Using index condition; Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
1 PRIMARY t3 ref idx idx 5 test.t1.c2 3
-2 MATERIALIZED t2 range idx idx 5 NULL 3 Using index condition; Using where
explain delete from t1 using t1,t3
where t1.c2 = t3.c2 and
t1.c1 > 1 and
exists (select 'X' from t2 where t2.c1 = t1.c1 and t2.c2 > 4);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL idx NULL NULL NULL 8 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
+1 PRIMARY t2 range idx idx 5 NULL 3 Using where; FirstMatch(t1)
1 PRIMARY t3 ref idx idx 5 test.t1.c2 3 Using index
-2 MATERIALIZED t2 range idx idx 5 NULL 3 Using where
explain update t1,t3 set t1.c1 = t1.c1+10
where t1.c2 = t3.c2 and
t1.c1 > 1 and
exists (select 'X' from t2 where t2.c1 = t1.c1 and t2.c2 > 4);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL idx NULL NULL NULL 8 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
+1 PRIMARY t2 range idx idx 5 NULL 3 Using where; FirstMatch(t1)
1 PRIMARY t3 ref idx idx 5 test.t1.c2 3 Using index
-2 MATERIALIZED t2 range idx idx 5 NULL 3 Using where
create table t as select * from t1;
select * from t1,t3
where t1.c2 = t3.c2 and
diff --git a/mysql-test/main/multi_update.test b/mysql-test/main/multi_update.test
index 48e6250393b..b9ceb458db6 100644
--- a/mysql-test/main/multi_update.test
+++ b/mysql-test/main/multi_update.test
@@ -1121,12 +1121,15 @@ INSERT INTO t1 (part,a,b) VALUES (0,0,0),(1,1,1),(2,2,2);
INSERT INTO t2 (part,a,b) VALUES (0,0,0),(1,1,1),(2,2,2);
--echo # Expecting partition "Current"
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON UPDATE t2 JOIN t1 USING(a) SET t2.part=3 WHERE t2.part=0 AND t1.part=0;
--echo # Expecting partition "Relevant"
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON UPDATE t2 JOIN t1 USING(a) SET t2.part=2 WHERE t2.part=1 AND t1.part=1;
--echo # Expecting partition "Archive"
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON UPDATE t2 JOIN t1 USING(a) SET t2.part=3 WHERE t2.part=2 AND t1.part=2;
DROP TABLES t1, t2;
diff --git a/mysql-test/main/myisam.result b/mysql-test/main/myisam.result
index 0b0099d7b84..477434e2a5c 100644
--- a/mysql-test/main/myisam.result
+++ b/mysql-test/main/myisam.result
@@ -348,11 +348,11 @@ t1 1 c_2 2 a A 5 NULL NULL BTREE NO
explain select * from t1,t2 where t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL a NULL NULL NULL 2
-1 SIMPLE t1 ALL a NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t1 ref a a 4 test.t2.a 3
explain select * from t1,t2 force index(a) where t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL a NULL NULL NULL 2
-1 SIMPLE t1 ALL a NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t1 ref a a 4 test.t2.a 3
explain select * from t1 force index(a),t2 force index(a) where t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL a NULL NULL NULL 2
@@ -388,10 +388,10 @@ t1 1 c_2 2 a A 5 NULL NULL BTREE NO
explain select * from t1,t2 force index(c) where t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2
-1 SIMPLE t1 ALL a NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t1 ref a a 4 test.t2.a 3
explain select * from t1 where a=0 or a=2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL a NULL NULL NULL 5 Using where
+1 SIMPLE t1 range a a 4 NULL 5 Using index condition
explain select * from t1 force index (a) where a=0 or a=2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 4 NULL 5 Using index condition
@@ -644,6 +644,13 @@ id select_type table type possible_keys key key_len ref rows Extra
select count(*) from t1 where a is null;
count(*)
2
+insert into t1 values (1,''), (2,'');
+explain select count(*) from t1 where a is null;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref idx idx 4 const 2 Using where
+set statement optimizer_scan_setup_cost= 0 for explain select count(*) from t1 where a is null;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL idx NULL NULL NULL 4 Using where
drop table t1;
create table t1 (c1 int, c2 varchar(4) not null default '',
key(c2(3))) default charset=utf8;
@@ -2532,6 +2539,7 @@ DROP TABLE t1, t2, t3;
#
# BUG#51307 - widespread corruption with partitions and insert...select
#
+call mtr.add_suppression("Enabling keys got errno 12 on test.t1, retrying");
CREATE TABLE t1(a CHAR(255), KEY(a));
SELECT * FROM t1, t1 AS a1;
a a
@@ -2810,3 +2818,44 @@ drop table t;
#
# End of 10.8 tests
#
+CREATE TABLE t1 (pk INT, a INT, PRIMARY KEY(pk), KEY(a)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,NULL),(2,NULL),(3,1),(4,5);
+SET @save_optimizer_switch= @@optimizer_switch;
+SET optimizer_switch='index_merge_sort_intersection=on';
+SELECT pk FROM t1 WHERE pk > 2 AND a IS NULL;
+pk
+SET @@optimizer_switch= @save_optimizer_switch;
+drop table t1;
+#
+# MDEV-30104 Server crashes in handler_rowid_filter_check upon ANALYZE TABLE
+#
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1),(2);
+CREATE TABLE t3 (c INT) ENGINE=MyISAM;
+INSERT INTO t3 VALUES (1),(2);
+CREATE TABLE t4 (pk INT, f CHAR(8), PRIMARY KEY(pk), KEY(f)) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (1,'o'),(2,'x');
+ANALYZE TABLE t1, t2, t3, t4 PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
+test.t4 analyze status Engine-independent statistics collected
+test.t4 analyze status OK
+SELECT * FROM t1 LEFT JOIN (t2 JOIN t3 ON 1) ON 2 IN (SELECT pk FROM t4 WHERE f < 's');
+a b c
+1 NULL NULL
+2 NULL NULL
+ANALYZE TABLE t4 PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.t4 analyze status Engine-independent statistics collected
+test.t4 analyze status Table is already up to date
+DROP TABLE t1, t2, t3, t4;
+#
+# End of 11.0 tests
+#
diff --git a/mysql-test/main/myisam.test b/mysql-test/main/myisam.test
index ec49e71bc2d..1a20f97a54f 100644
--- a/mysql-test/main/myisam.test
+++ b/mysql-test/main/myisam.test
@@ -593,6 +593,9 @@ create table t1 ( a tinytext, b char(1), index idx (a(1),b) );
insert into t1 values (null,''), (null,'');
explain select count(*) from t1 where a is null;
select count(*) from t1 where a is null;
+insert into t1 values (1,''), (2,'');
+explain select count(*) from t1 where a is null;
+set statement optimizer_scan_setup_cost= 0 for explain select count(*) from t1 where a is null;
drop table t1;
#
@@ -1676,6 +1679,9 @@ DROP TABLE t1, t2, t3;
--echo #
--echo # BUG#51307 - widespread corruption with partitions and insert...select
--echo #
+
+call mtr.add_suppression("Enabling keys got errno 12 on test.t1, retrying");
+
CREATE TABLE t1(a CHAR(255), KEY(a));
SELECT * FROM t1, t1 AS a1;
SET myisam_sort_buffer_size=4;
@@ -1910,3 +1916,38 @@ drop table t;
--echo #
--echo # End of 10.8 tests
--echo #
+
+CREATE TABLE t1 (pk INT, a INT, PRIMARY KEY(pk), KEY(a)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,NULL),(2,NULL),(3,1),(4,5);
+SET @save_optimizer_switch= @@optimizer_switch;
+SET optimizer_switch='index_merge_sort_intersection=on';
+SELECT pk FROM t1 WHERE pk > 2 AND a IS NULL;
+SET @@optimizer_switch= @save_optimizer_switch;
+drop table t1;
+
+--echo #
+--echo # MDEV-30104 Server crashes in handler_rowid_filter_check upon ANALYZE TABLE
+--echo #
+
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+
+CREATE TABLE t2 (b INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1),(2); # Optional, fails either way
+
+CREATE TABLE t3 (c INT) ENGINE=MyISAM;
+INSERT INTO t3 VALUES (1),(2); # Optional, fails either way
+
+CREATE TABLE t4 (pk INT, f CHAR(8), PRIMARY KEY(pk), KEY(f)) ENGINE=MyISAM;
+INSERT INTO t4 VALUES (1,'o'),(2,'x');
+
+ANALYZE TABLE t1, t2, t3, t4 PERSISTENT FOR ALL; # Optional, fails either way
+SELECT * FROM t1 LEFT JOIN (t2 JOIN t3 ON 1) ON 2 IN (SELECT pk FROM t4 WHERE f < 's');
+
+ANALYZE TABLE t4 PERSISTENT FOR ALL;
+
+DROP TABLE t1, t2, t3, t4;
+
+--echo #
+--echo # End of 11.0 tests
+--echo #
diff --git a/mysql-test/main/myisam_debug.result b/mysql-test/main/myisam_debug.result
index 10208a936a0..698a45294c0 100644
--- a/mysql-test/main/myisam_debug.result
+++ b/mysql-test/main/myisam_debug.result
@@ -23,8 +23,6 @@ SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST
WHERE STATE = 'wait_in_enable_indexes' AND
INFO = "INSERT INTO t1(id) SELECT id FROM t2"
INTO @thread_id;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
KILL QUERY @thread_id;
CHECK TABLE t1;
Table Op Msg_type Msg_text
diff --git a/mysql-test/main/myisam_explain_non_select_all.result b/mysql-test/main/myisam_explain_non_select_all.result
index 2ff966fdfd3..c8c26d76a70 100644
--- a/mysql-test/main/myisam_explain_non_select_all.result
+++ b/mysql-test/main/myisam_explain_non_select_all.result
@@ -7,8 +7,6 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: UPDATE t1 SET a = 10 WHERE a < 10
# select: SELECT * FROM t1 WHERE a < 10
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a = 10 WHERE a < 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
@@ -30,8 +28,6 @@ Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` <
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 2
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -50,8 +46,6 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: DELETE FROM t1 WHERE a < 10
# select: SELECT * FROM t1 WHERE a < 10
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a < 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
@@ -73,8 +67,6 @@ Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` <
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 2
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -93,8 +85,6 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: DELETE FROM t1 USING t1 WHERE a = 1
# select: SELECT * FROM t1 WHERE a = 1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 USING t1 WHERE a = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
@@ -116,8 +106,6 @@ Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` =
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 2
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -138,8 +126,6 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1, t2 SET t1.a = 10 WHERE t1.a = 1
# select: SELECT * FROM t1, t2 WHERE t1.a = 1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1, t2 SET t1.a = 10 WHERE t1.a = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
@@ -164,8 +150,6 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -186,8 +170,6 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = 10 WHERE t11.a = 1
# select: SELECT * FROM t1 t11, (SELECT * FROM t2) t12 WHERE t11.a = 1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = 10 WHERE t11.a = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t11 ALL NULL NULL NULL NULL 3 Using where
@@ -214,8 +196,6 @@ Note 1003 select `test`.`t11`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -236,8 +216,6 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 SET a = 10 WHERE 1 IN (SELECT 1 FROM t2 WHERE t2.b < 3)
# select: SELECT * FROM t1 WHERE 1 IN (SELECT 1 FROM t2 WHERE t2.b < 3)
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a = 10 WHERE 1 IN (SELECT 1 FROM t2 WHERE t2.b < 3);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
@@ -256,20 +234,17 @@ FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1 IN (SELECT 1 FROM t2 WHERE t2.b < 3);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 33.33 Using where; FirstMatch
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using join buffer (flat, BNL join)
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` < 3
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 5
-Handler_read_rnd_next 8
+Handler_read_key 4
+Handler_read_rnd_next 5
# Status of testing query execution:
Variable_name Value
Handler_read_key 4
@@ -286,8 +261,6 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 SET a = 10 WHERE a IN (SELECT b FROM t2 WHERE t1.a < 3)
# select: SELECT * FROM t1 WHERE a IN (SELECT b FROM t2 WHERE t1.a < 3)
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a = 10 WHERE a IN (SELECT b FROM t2 WHERE t1.a < 3);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
@@ -308,15 +281,13 @@ FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a IN (SELECT b FROM t2 WHERE t1.a < 3);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 33.33 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`a` and `test`.`t1`.`a` < 3
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -328,60 +299,59 @@ Handler_read_rnd_next 7
Handler_update 2
DROP TABLE t1, t2;
-#7
+#7a
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (2), (3);
CREATE TABLE t2 (b INT);
-INSERT INTO t2 VALUES (1), (2), (3);
+INSERT INTO t2 VALUES (1), (2), (3), (1000);
+CREATE TABLE t3 like t2;
+insert into t3 select * from t2;
+insert into t3 select seq from seq_1001_to_2000;
#
-# query: UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT b FROM t2 WHERE t2.b < 3)
-# select: SELECT * FROM t1, t2 WHERE a IN (SELECT b FROM t2 WHERE t2.b < 3)
+# query: UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT b FROM t3 WHERE t3.b < 3)
+# select: SELECT * FROM t1, t2 WHERE a IN (SELECT b FROM t3 WHERE t3.b < 3)
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
-EXPLAIN UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT b FROM t2 WHERE t2.b < 3);
+EXPLAIN UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT b FROM t3 WHERE t3.b < 3);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-1 PRIMARY t2 ALL NULL NULL NULL NULL 3
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4
+2 MATERIALIZED t3 ALL NULL NULL NULL NULL 1004 Using where
FLUSH STATUS;
FLUSH TABLES;
-EXPLAIN EXTENDED UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT b FROM t2 WHERE t2.b < 3);
+EXPLAIN EXTENDED UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT b FROM t3 WHERE t3.b < 3);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 100.00
+2 MATERIALIZED t3 ALL NULL NULL NULL NULL 1004 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
-Handler_read_key 4
+Handler_read_key 6
FLUSH STATUS;
FLUSH TABLES;
-EXPLAIN EXTENDED SELECT * FROM t1, t2 WHERE a IN (SELECT b FROM t2 WHERE t2.b < 3);
+EXPLAIN EXTENDED SELECT * FROM t1, t2 WHERE a IN (SELECT b FROM t3 WHERE t3.b < 3);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using join buffer (flat, BNL join)
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 100.00 Using join buffer (flat, BNL join)
+2 MATERIALIZED t3 ALL NULL NULL NULL NULL 1004 100.00 Using where
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` semi join (`test`.`t2`) join `test`.`t2` where `test`.`t2`.`b` < 3
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` semi join (`test`.`t3`) join `test`.`t2` where `test`.`t3`.`b` < 3
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
-Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+Handler_read_key 6
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 7
-Handler_read_rnd_next 12
+Handler_read_key 9
+Handler_read_rnd_next 1014
# Status of testing query execution:
Variable_name Value
-Handler_read_key 7
-Handler_read_rnd_next 16
+Handler_read_key 9
+Handler_read_rnd_next 1019
Handler_update 2
-DROP TABLE t1, t2;
+DROP TABLE t1, t2, t3;
#8
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (2), (3);
@@ -391,8 +361,6 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = t11.a + 10
# select: SELECT * FROM t1 t11, (SELECT * FROM t2) t12
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = t11.a + 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t11 ALL NULL NULL NULL NULL 3
@@ -419,8 +387,6 @@ Note 1003 select `test`.`t11`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -443,8 +409,6 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 t11, (SELECT 1 FROM DUAL) t12 SET t11.a = t11.a + 10
# select: SELECT * FROM t1 t11, (SELECT 1 FROM DUAL) t12
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 t11, (SELECT 1 FROM DUAL) t12 SET t11.a = t11.a + 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <derived2> system NULL NULL NULL NULL 1
@@ -474,8 +438,6 @@ Note 1003 /* select#1 */ select `test`.`t11`.`a` AS `a`,1 AS `1` from `test`.`t1
Variable_name Value
Handler_read_key 2
Handler_read_rnd_next 1
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -497,8 +459,6 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = 10 WHERE t11.a > 1
# select: SELECT * FROM t1 t11, (SELECT * FROM t2) t12 WHERE t11.a > 1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = 10 WHERE t11.a > 1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t11 ALL NULL NULL NULL NULL 3 Using where
@@ -525,8 +485,6 @@ Note 1003 select `test`.`t11`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -545,8 +503,6 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: DELETE FROM t1 WHERE a > 1 LIMIT 1
# select: SELECT * FROM t1 WHERE a > 1 LIMIT 1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a > 1 LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
@@ -568,8 +524,6 @@ Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` >
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 2
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -588,8 +542,6 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: DELETE FROM t1 WHERE 0
# select: SELECT * FROM t1 WHERE 0
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
@@ -611,8 +563,6 @@ Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 0
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 2
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -628,8 +578,6 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: DELETE FROM t1 USING t1 WHERE 0
# select: SELECT * FROM t1 WHERE 0
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 USING t1 WHERE 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
@@ -651,8 +599,6 @@ Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 0
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 2
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -668,8 +614,6 @@ INSERT INTO t1 VALUES (3, 3), (7, 7);
# query: DELETE FROM t1 WHERE a = 3
# select: SELECT * FROM t1 WHERE a = 3
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a = 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 1 Using where
@@ -691,8 +635,6 @@ Note 1003 select 3 AS `a`,3 AS `b` from `test`.`t1` where 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 6
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 6
@@ -709,8 +651,6 @@ INSERT INTO t1 VALUES (3, 3), (7, 7);
# query: DELETE FROM t1 WHERE a < 3
# select: SELECT * FROM t1 WHERE a < 3
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 1 Using where
@@ -732,8 +672,6 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 5
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 6
@@ -748,8 +686,6 @@ CREATE TABLE t1 ( a int PRIMARY KEY );
# query: DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a
# select: SELECT * FROM t1 WHERE t1.a > 0 ORDER BY t1.a
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
@@ -772,8 +708,6 @@ Note 1003 select NULL AS `a` from `test`.`t1` where 0 order by NULL
Variable_name Value
Handler_read_key 3
Handler_read_rnd_next 1
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 3
@@ -787,8 +721,6 @@ INSERT INTO t1 VALUES (1), (2), (3), (-1), (-2), (-3);
# query: DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a
# select: SELECT * FROM t1 WHERE t1.a > 0 ORDER BY t1.a
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where
@@ -810,8 +742,6 @@ Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` >
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 3
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -830,8 +760,6 @@ INSERT INTO t1 VALUES (4),(3),(1),(2);
# query: DELETE FROM t1 WHERE (@a:= a) ORDER BY a LIMIT 1
# select: SELECT * FROM t1 WHERE (@a:= a) ORDER BY a LIMIT 1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE (@a:= a) ORDER BY a LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 4 NULL 1 Using where
@@ -853,8 +781,6 @@ Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where @a:=`test`.`t1`.`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 3
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
@@ -874,8 +800,6 @@ UPDATE t1 SET a = c, b = c;
# query: DELETE FROM t1 ORDER BY a ASC, b ASC LIMIT 1
# select: SELECT * FROM t1 ORDER BY a ASC, b ASC LIMIT 1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 ORDER BY a ASC, b ASC LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using filesort
@@ -897,8 +821,6 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t1`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 7
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 7
@@ -927,8 +849,6 @@ INSERT INTO t3 VALUES (1,1), (2,1), (1,3);
# query: DELETE t1,t2,t3 FROM t1,t2,t3 WHERE a1=a2 AND b2=a3 AND b1=b3
# select: SELECT * FROM t1,t2,t3 WHERE a1=a2 AND b2=a3 AND b1=b3
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE t1,t2,t3 FROM t1,t2,t3 WHERE a1=a2 AND b2=a3 AND b1=b3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3
@@ -956,8 +876,6 @@ Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 13
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 19
@@ -976,23 +894,21 @@ DROP TABLE t1, t2, t3;
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (2), (3);
CREATE TABLE t2 (a INT);
-INSERT INTO t2 VALUES (1), (2), (3);
+INSERT INTO t2 VALUES (1), (2), (3), (1000);
#
# query: UPDATE t1 SET a = 10 WHERE a IN (SELECT a FROM t2)
# select: SELECT * FROM t1 WHERE a IN (SELECT a FROM t2)
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a = 10 WHERE a IN (SELECT a FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 4 Using where
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1 SET a = 10 WHERE a IN (SELECT a FROM t2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 4 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 4
@@ -1001,19 +917,16 @@ FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a IN (SELECT a FROM t2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 25.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where 1
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`a` = `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 7
-Handler_read_rnd_next 8
+Handler_read_key 4
+Handler_read_rnd_next 9
# Status of testing query execution:
Variable_name Value
Handler_read_key 4
@@ -1031,8 +944,6 @@ SET @save_optimizer_switch= @@optimizer_switch;
# query: DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2)
# select: SELECT * FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2)
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
@@ -1057,8 +968,6 @@ Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1` from `test`.`t1` where
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 9
@@ -1076,8 +985,6 @@ INSERT INTO t1 VALUES (1), (2), (3), (4), (5);
# query: DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2)
# select: SELECT * FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2)
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
@@ -1096,14 +1003,12 @@ FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00
-1 PRIMARY t2 ALL NULL NULL NULL NULL 5 100.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 5 20.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`a2` > 2 and `test`.`t1`.`a1` = `test`.`t2`.`a2`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -1122,8 +1027,6 @@ INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5);
# query: UPDATE t1 SET i = 10
# select: SELECT * FROM t1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET i = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
@@ -1145,8 +1048,6 @@ Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`j` AS `j` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 3
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 3
@@ -1165,8 +1066,6 @@ INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5);
# query: DELETE FROM t1
# select: SELECT * FROM t1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL 5 Deleting all rows
@@ -1188,8 +1087,6 @@ Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`j` AS `j` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 3
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 3
@@ -1211,8 +1108,6 @@ INSERT INTO t2 (a, b, c) SELECT t1.i, t1.i, t1.i FROM t1, t1 x1, t1 x2;
# query: DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL a 15 NULL 5 Using where
@@ -1234,8 +1129,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 8
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
@@ -1257,8 +1150,6 @@ CREATE TABLE t2 (i INT);
# query: INSERT INTO t2 SELECT * FROM t1
# select: SELECT * FROM t1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN INSERT INTO t2 SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3
@@ -1280,8 +1171,6 @@ Note 1003 select `test`.`t1`.`i` AS `i` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 2
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -1301,8 +1190,6 @@ CREATE TABLE t2 (i INT);
# query: REPLACE INTO t2 SELECT * FROM t1
# select: SELECT * FROM t1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN REPLACE INTO t2 SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3
@@ -1324,8 +1211,6 @@ Note 1003 select `test`.`t1`.`i` AS `i` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 2
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -1392,8 +1277,6 @@ INSERT INTO t1 (i) VALUES (10),(11),(12),(13),(14),(15),(16),(17),(18),(19),
# query: DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
# select: SELECT * FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 5 Using where
@@ -1415,8 +1298,6 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`i` AS `i` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 5
@@ -1437,8 +1318,6 @@ INSERT INTO t1 (i) VALUES (10),(11),(12),(13),(14),(15),(16),(17),(18),(19),
# query: DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
# select: SELECT * FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1460,8 +1339,6 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`i` AS `i` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -1490,16 +1367,14 @@ INSERT INTO t2 (a, b, c) SELECT i, i, i FROM t1;
# query: DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
+1 SIMPLE t2 index NULL a 15 NULL 5 Using where
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 8
@@ -1507,29 +1382,23 @@ FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 8
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_first 1
Handler_read_key 8
-Handler_read_rnd_next 27
-Sort_priority_queue_sorts 1
-Sort_rows 1
-Sort_scan 1
+Handler_read_next 26
# Status of testing query execution:
Variable_name Value
Handler_delete 1
+Handler_read_first 1
Handler_read_key 8
-Handler_read_rnd 1
-Handler_read_rnd_next 27
-Sort_rows 1
-Sort_scan 1
+Handler_read_next 26
DROP TABLE t1, t2;
#32
@@ -1544,8 +1413,6 @@ INSERT INTO t2 (a, b, c) SELECT t1.i, t1.i, t1.i FROM t1, t1 x1, t1 x2;
# query: DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL a 15 NULL 5 Using where
@@ -1567,8 +1434,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 8
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
@@ -1593,8 +1458,6 @@ INSERT INTO t2 SELECT i, i, i, i FROM t1;
# query: DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1616,8 +1479,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 8
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 8
@@ -1647,8 +1508,6 @@ INSERT INTO t2 SELECT i, i, i, i FROM t1;
# query: DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1670,8 +1529,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 8
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 8
@@ -1702,8 +1559,6 @@ INSERT INTO t2 (key1, key2) SELECT i, i FROM t1;
# query: DELETE FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1
# select: SELECT * FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 Using sort_union(key1,key2); Using where; Using filesort
@@ -1725,8 +1580,6 @@ Note 1003 select `test`.`t2`.`i` AS `i`,`test`.`t2`.`key1` AS `key1`,`test`.`t2`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 6
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 8
@@ -1755,8 +1608,6 @@ INSERT INTO t2 (i) SELECT i FROM t1;
# query: DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5
# select: SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 Using where
@@ -1778,8 +1629,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`i` AS `i` from `test`.`t2`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 5
@@ -1802,8 +1651,6 @@ INSERT INTO t2 SELECT i, i, i FROM t1;
# query: DELETE FROM t2 ORDER BY a, b DESC LIMIT 5
# select: SELECT * FROM t2 ORDER BY a, b DESC LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 ORDER BY a, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using filesort
@@ -1825,8 +1672,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 6
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 6
@@ -1856,8 +1701,6 @@ INSERT INTO t2 (a, b) SELECT t1.i, t1.i FROM t1, t1 x1, t1 x2;
# query: DELETE FROM t2 ORDER BY a DESC, b DESC LIMIT 5
# select: SELECT * FROM t2 ORDER BY a DESC, b DESC LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 ORDER BY a DESC, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL a 6 NULL 5
@@ -1879,8 +1722,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 6
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 6
@@ -1905,8 +1746,6 @@ INSERT INTO t2 (i) SELECT i FROM t1;
# query: UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
# select: SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 Using where; Using buffer
@@ -1928,8 +1767,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`i` AS `i` from `test`.`t2`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 5
@@ -1953,8 +1790,6 @@ INSERT INTO t2 (i) SELECT i FROM t1;
# query: UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
# select: SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1976,8 +1811,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`i` AS `i` from `test`.`t2`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -2007,16 +1840,14 @@ INSERT INTO t2 (a, b, c) SELECT i, i, i FROM t1;
# query: UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
+1 SIMPLE t2 index NULL a 15 NULL 5 Using where; Using buffer
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where; Using buffer
# Status of EXPLAIN EXTENDED query
Variable_name Value
Handler_read_key 8
@@ -2024,30 +1855,24 @@ FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
+1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 8
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_first 1
Handler_read_key 8
-Handler_read_rnd_next 27
-Sort_priority_queue_sorts 1
-Sort_rows 1
-Sort_scan 1
+Handler_read_next 26
# Status of testing query execution:
Variable_name Value
+Handler_read_first 1
Handler_read_key 8
+Handler_read_next 26
Handler_read_rnd 1
-Handler_read_rnd_next 27
Handler_update 1
-Sort_priority_queue_sorts 1
-Sort_rows 1
-Sort_scan 1
DROP TABLE t1, t2;
#42
@@ -2062,8 +1887,6 @@ INSERT INTO t2 (a, b, c) SELECT t1.i, t1.i, t1.i FROM t1, t1 x1, t1 x2;
# query: UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL a 15 NULL 5 Using where; Using buffer
@@ -2085,8 +1908,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 8
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
@@ -2112,8 +1933,6 @@ INSERT INTO t2 SELECT i, i, i, i FROM t1;
# query: UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -2135,8 +1954,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 8
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 8
@@ -2166,8 +1983,6 @@ INSERT INTO t2 SELECT i, i, i, i FROM t1;
# query: UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -2189,8 +2004,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 8
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 8
@@ -2221,8 +2034,6 @@ INSERT INTO t2 (key1, key2) SELECT i, i FROM t1;
# query: UPDATE t2 SET i = 123 WHERE key1 < 13 or key2 < 14 ORDER BY key1
# select: SELECT * FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET i = 123 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 Using sort_union(key1,key2); Using where; Using filesort
@@ -2244,8 +2055,6 @@ Note 1003 select `test`.`t2`.`i` AS `i`,`test`.`t2`.`key1` AS `key1`,`test`.`t2`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 6
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 8
@@ -2274,8 +2083,6 @@ INSERT INTO t2 (i) SELECT i FROM t1;
# query: UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5
# select: SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 Using where; Using buffer
@@ -2297,8 +2104,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`i` AS `i` from `test`.`t2`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 5
@@ -2322,8 +2127,6 @@ INSERT INTO t2 SELECT i, i, i FROM t1;
# query: UPDATE t2 SET c = 10 ORDER BY a, b DESC LIMIT 5
# select: SELECT * FROM t2 ORDER BY a, b DESC LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET c = 10 ORDER BY a, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using filesort
@@ -2345,8 +2148,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 6
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 6
@@ -2377,8 +2178,6 @@ INSERT INTO t2 (a, b) SELECT t1.i, t1.i FROM t1, t1 x1, t1 x2;
# query: UPDATE t2 SET c = 10 ORDER BY a DESC, b DESC LIMIT 5
# select: SELECT * FROM t2 ORDER BY a DESC, b DESC LIMIT 5
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET c = 10 ORDER BY a DESC, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL a 6 NULL 5 Using buffer
@@ -2400,8 +2199,6 @@ Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` A
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 6
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 6
@@ -2429,8 +2226,6 @@ INSERT INTO t1 VALUES (1,'y',1), (2,'n',2), (3,'y',3), (4,'n',4);
# query: UPDATE t1 SET c2 = 0 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2
# select: SELECT * FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET c2 = 0 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 Using where; Using filesort
@@ -2452,8 +2247,6 @@ Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1_idx` AS `c1_idx`,`test
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 6
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 7
@@ -2475,8 +2268,6 @@ Sort_rows 2
# query: DELETE FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2
# select: SELECT * FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 Using where; Using filesort
@@ -2498,8 +2289,6 @@ Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1_idx` AS `c1_idx`,`test
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 6
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 7
@@ -2524,8 +2313,6 @@ INSERT INTO t1 VALUES (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(
# query: UPDATE t1 SET a=a+10 WHERE a > 34
# select: SELECT * FROM t1 WHERE a > 34
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a=a+10 WHERE a > 34;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 Using where; Using buffer
@@ -2547,8 +2334,6 @@ Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` >
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 3
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -2569,8 +2354,6 @@ INSERT INTO t1 VALUES (1, 1, 10), (2, 2, 20);
# query: UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10
# select: SELECT * FROM t1 LEFT JOIN t2 ON t1.c1 = t2.c1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 system NULL NULL NULL NULL 0 Const row not found
@@ -2597,8 +2380,6 @@ Note 1003 select `test`.`t1`.`c1` AS `c1`,`test`.`t1`.`c2` AS `c2`,`test`.`t1`.`
Variable_name Value
Handler_read_key 7
Handler_read_rnd_next 1
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 7
@@ -2612,8 +2393,6 @@ Handler_read_rnd_next 4
# query: UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10 WHERE t1.c3 = 10
# select: SELECT * FROM t1 LEFT JOIN t2 ON t1.c1 = t2.c1 WHERE t1.c3 = 10
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10 WHERE t1.c3 = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 system NULL NULL NULL NULL 0 Const row not found
@@ -2640,8 +2419,6 @@ Note 1003 select `test`.`t1`.`c1` AS `c1`,`test`.`t1`.`c2` AS `c2`,`test`.`t1`.`
Variable_name Value
Handler_read_key 7
Handler_read_rnd_next 1
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 7
@@ -2662,18 +2439,16 @@ INSERT INTO t2 VALUES(1,1),(2,2);
# query: UPDATE t1 SET t1.f2=(SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1)
# select: SELECT (SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1) FROM t1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET t1.f2=(SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
-2 DEPENDENT SUBQUERY t2 ALL IDX NULL NULL NULL 2 Using where
+2 DEPENDENT SUBQUERY t2 ref IDX IDX 5 test.t1.f1 1
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1 SET t1.f2=(SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-2 DEPENDENT SUBQUERY t2 ALL IDX NULL NULL NULL 2 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ref IDX IDX 5 test.t1.f1 1 100.00
Warnings:
Note 1276 Field or reference 'test.t1.f1' of SELECT #2 was resolved in SELECT #1
# Status of EXPLAIN EXTENDED query
@@ -2684,23 +2459,23 @@ FLUSH TABLES;
EXPLAIN EXTENDED SELECT (SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1) FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-2 DEPENDENT SUBQUERY t2 ALL IDX NULL NULL NULL 2 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ref IDX IDX 5 test.t1.f1 1 100.00
Warnings:
Note 1276 Field or reference 'test.t1.f1' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select <expr_cache><`test`.`t1`.`f1`>((/* select#2 */ select max(`test`.`t2`.`f4`) from `test`.`t2` where `test`.`t2`.`f3` = `test`.`t1`.`f1`)) AS `(SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1)` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 7
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 9
-Handler_read_rnd_next 9
+Handler_read_key 11
+Handler_read_next 2
+Handler_read_rnd_next 3
# Status of testing query execution:
Variable_name Value
-Handler_read_key 7
-Handler_read_rnd_next 9
+Handler_read_key 9
+Handler_read_next 2
+Handler_read_rnd_next 3
Handler_update 2
DROP TABLE t1, t2;
@@ -2735,8 +2510,6 @@ CREATE VIEW v1 AS SELECT t11.a, t12.a AS b FROM t1 t11, t1 t12;
# query: UPDATE v1 SET a = 1 WHERE a > 0
# select: SELECT * FROM v1 WHERE a > 0
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE v1 SET a = 1 WHERE a > 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t11 ALL NULL NULL NULL NULL 2 Using where
@@ -2761,8 +2534,6 @@ Note 1003 select `test`.`t11`.`a` AS `a`,`test`.`t12`.`a` AS `b` from `test`.`t1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 2
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -2778,8 +2549,6 @@ Handler_read_rnd_next 8
# query: UPDATE t1, v1 SET v1.a = 1 WHERE t1.a = v1.a
# select: SELECT * FROM t1, v1 WHERE t1.a = v1.a
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1, v1 SET v1.a = 1 WHERE t1.a = v1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
@@ -2807,8 +2576,6 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t11`.`a` AS `a`,`test`.`t12`.`a`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 2
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -2831,8 +2598,6 @@ CREATE VIEW v1 (a) AS SELECT a FROM t1;
# query: DELETE FROM v1 WHERE a < 4
# select: SELECT * FROM v1 WHERE a < 4
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM v1 WHERE a < 4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where
@@ -2854,8 +2619,6 @@ Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` <
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 3
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
@@ -2880,8 +2643,6 @@ CREATE VIEW v1 (a,c) AS SELECT a, b+1 FROM t1;
# query: DELETE v1 FROM t2, v1 WHERE t2.x = v1.a
# select: SELECT * FROM t2, v1 WHERE t2.x = v1.a
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE v1 FROM t2, v1 WHERE t2.x = v1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 4 Using where
@@ -2906,8 +2667,6 @@ Note 1003 select `test`.`t2`.`x` AS `x`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` +
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 6
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 10
@@ -2931,8 +2690,6 @@ CREATE VIEW v1 (a,c) AS SELECT a, b+1 FROM t1;
# query: DELETE v1 FROM t2, v1 WHERE t2.x = v1.a
# select: SELECT * FROM t2, v1 WHERE t2.x = v1.a
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE v1 FROM t2, v1 WHERE t2.x = v1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 4 Using where
@@ -2957,8 +2714,6 @@ Note 1003 select `test`.`t2`.`x` AS `x`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` +
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 6
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 10
@@ -3017,8 +2772,6 @@ CREATE VIEW v1 (x) AS SELECT b FROM t2;
# query: INSERT INTO v1 SELECT * FROM t1
# select: SELECT * FROM t1
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN INSERT INTO v1 SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 system NULL NULL NULL NULL 0 Const row not found
@@ -3042,8 +2795,6 @@ Note 1003 select NULL AS `a` from `test`.`t1`
Variable_name Value
Handler_read_key 2
Handler_read_rnd_next 1
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 2
@@ -3070,19 +2821,17 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
# select: SELECT * FROM t1 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 5 func 2
+2 DEPENDENT SUBQUERY <derived3> unique_subquery distinct_key distinct_key 5 func 1
3 DERIVED t2 ALL NULL NULL NULL NULL 3 Using filesort
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1 SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 5 func 2 100.00
+2 DEPENDENT SUBQUERY <derived3> unique_subquery distinct_key distinct_key 5 func 1 100.00
3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
@@ -3092,15 +2841,13 @@ FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY <derived3> ref key0 key0 5 test.t1.a 2 100.00 FirstMatch(t1)
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 5 test.t1.a 1 100.00
3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join ((/* select#3 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) where `x`.`b` = `test`.`t1`.`a`
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from (/* select#3 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x` join `test`.`t1` where `x`.`b` = `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 7
@@ -3121,12 +2868,10 @@ Sort_scan 1
# query: UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
# select: SELECT * FROM t1, t2 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-1 PRIMARY <derived3> ref key0 key0 5 test.t1.a 2 FirstMatch(t1)
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 5 test.t1.a 1
1 PRIMARY t2 ALL NULL NULL NULL NULL 3
3 DERIVED t2 ALL NULL NULL NULL NULL 3 Using filesort
FLUSH STATUS;
@@ -3134,7 +2879,7 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY <derived3> ref key0 key0 5 test.t1.a 2 100.00 FirstMatch(t1)
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 5 test.t1.a 1 100.00
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00
3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort
# Status of EXPLAIN EXTENDED query
@@ -3145,16 +2890,14 @@ FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1, t2 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY <derived3> ref key0 key0 5 test.t1.a 2 100.00 FirstMatch(t1)
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 5 test.t1.a 1 100.00
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using join buffer (flat, BNL join)
3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` semi join ((/* select#3 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) join `test`.`t2` where `x`.`b` = `test`.`t1`.`a`
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t2`.`b` AS `b` from (/* select#3 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x` join `test`.`t1` join `test`.`t2` where `x`.`b` = `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 7
@@ -3174,12 +2917,10 @@ Sort_scan 1
# query: UPDATE t1, (SELECT * FROM t2) y SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
# select: SELECT * FROM t1, (SELECT * FROM t2) y WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1, (SELECT * FROM t2) y SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-1 PRIMARY <derived4> ref key0 key0 5 test.t1.a 2 FirstMatch(t1)
+1 PRIMARY <derived4> eq_ref distinct_key distinct_key 5 test.t1.a 1
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3
4 DERIVED t2 ALL NULL NULL NULL NULL 3 Using filesort
2 DERIVED t2 ALL NULL NULL NULL NULL 3
@@ -3188,7 +2929,7 @@ FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1, (SELECT * FROM t2) y SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY <derived4> ref key0 key0 5 test.t1.a 2 100.00 FirstMatch(t1)
+1 PRIMARY <derived4> eq_ref distinct_key distinct_key 5 test.t1.a 1 100.00
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 100.00
4 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort
2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00
@@ -3200,16 +2941,14 @@ FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1, (SELECT * FROM t2) y WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY <derived4> ref key0 key0 5 test.t1.a 2 100.00 FirstMatch(t1)
+1 PRIMARY <derived4> eq_ref distinct_key distinct_key 5 test.t1.a 1 100.00
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using join buffer (flat, BNL join)
4 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` semi join ((/* select#4 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) join `test`.`t2` where `x`.`b` = `test`.`t1`.`a`
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t2`.`b` AS `b` from (/* select#4 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x` join `test`.`t1` join `test`.`t2` where `x`.`b` = `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 4
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 7
@@ -3239,8 +2978,7 @@ JOIN t1 AS a12 ON a12.c1 = a11.c1
) d1
);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00
-2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
DROP TABLE t1, t2, t3;
#73
CREATE TABLE t1 (id INT);
@@ -3259,8 +2997,6 @@ INSERT INTO t1 VALUES (1), (2), (3), (4), (5);
# query: UPDATE t1 SET a=a+1 WHERE a>10
# select: SELECT a t1 FROM t1 WHERE a>10
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a=a+1 WHERE a>10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where; Using buffer
@@ -3282,8 +3018,6 @@ Note 1003 select `test`.`t1`.`a` AS `t1` from `test`.`t1` where `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 3
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -3296,8 +3030,6 @@ Handler_read_key 4
# query: UPDATE t1 SET a=a+1 WHERE a>10 ORDER BY a+20
# select: SELECT a t1 FROM t1 WHERE a>10 ORDER BY a+20
#
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a=a+1 WHERE a>10 ORDER BY a+20;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where; Using filesort
@@ -3319,8 +3051,6 @@ Note 1003 select `test`.`t1`.`a` AS `t1` from `test`.`t1` where `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
Handler_read_key 3
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_key 4
@@ -3375,7 +3105,7 @@ CALL p10();
DROP PROCEDURE p10;
CALL p9();
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 0
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
DROP PROCEDURE p9;
CALL p8();
DROP PROCEDURE p8;
diff --git a/mysql-test/main/myisam_icp.result b/mysql-test/main/myisam_icp.result
index 2bd9ac9ba95..2ffd2d9ad4b 100644
--- a/mysql-test/main/myisam_icp.result
+++ b/mysql-test/main/myisam_icp.result
@@ -448,9 +448,10 @@ c1 INT NOT NULL,
PRIMARY KEY (pk)
);
INSERT INTO t1 VALUES (1,9),(2,7),(3,6),(4,3),(5,1);
+insert into t1 select seq,seq from seq_100_to_110;
EXPLAIN SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 5 Using where
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 16 Using where
SET SESSION optimizer_switch='index_condition_pushdown=off';
SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
pk c1
@@ -458,6 +459,17 @@ pk c1
2 7
4 3
5 1
+100 100
+101 101
+102 102
+103 103
+104 104
+105 105
+106 106
+107 107
+108 108
+109 109
+110 110
DROP TABLE t1;
set optimizer_switch= @save_optimizer_switch;
#
@@ -675,7 +687,6 @@ DROP TABLE t1;
#
CREATE TABLE t1 (b int NOT NULL, c int, a varchar(1024), PRIMARY KEY (b));
INSERT INTO t1 VALUES (1,4,'Ill');
-insert into t1 select seq+100,5,seq from seq_1_to_100;
CREATE TABLE t2 (a varchar(1024), KEY (a(512)));
INSERT INTO t2 VALUES
('Ill'), ('eckqzsflbzaffti'), ('w'), ('she'), ('gxbwypqtjzwywwer'), ('w');
@@ -685,8 +696,8 @@ EXPLAIN
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL # Using where; Using filesort
-1 SIMPLE t2 ref a a 515 test.t1.a # Using where
+1 SIMPLE t1 system PRIMARY NULL NULL NULL #
+1 SIMPLE t2 ref a a 515 const # Using where
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
b c
@@ -696,8 +707,8 @@ EXPLAIN
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL # Using where; Using filesort
-1 SIMPLE t2 ref a a 515 test.t1.a # Using where
+1 SIMPLE t1 system PRIMARY NULL NULL NULL #
+1 SIMPLE t2 ref a a 515 const # Using where
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
b c
@@ -809,6 +820,8 @@ test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
SET @save_optimize_switch=@@optimizer_switch;
SET optimizer_switch='materialization=on';
+set @save_optimizer_where_cost=@@optimizer_where_cost;
+set @@optimizer_where_cost=1;
EXPLAIN
SELECT COUNT(*) FROM t1 AS t, t2
WHERE c = g
@@ -832,6 +845,7 @@ OR a = 0 AND h < 'z' );
COUNT(*)
1478
SET optimizer_switch=@save_optimizer_switch;
+set @@optimizer_where_cost=@save_optimizer_where_cost;
DROP TABLE t1,t2;
# check "Handler_pushed" status varuiables
CREATE TABLE t1 (
@@ -922,7 +936,7 @@ DROP TABLE t1;
# Bug#870046: ICP for a GROUP BY query
#
CREATE TABLE t1 (a int, b varchar(1), c varchar(1), INDEX idx(b));
-INSERT INTO t1 VALUES (2,'x','x'), (5,'x','y');
+INSERT INTO t1 VALUES (2,'x','x'), (5,'x','y'), (6,'a','b'), (7,'a','b');
SET SESSION optimizer_switch='index_condition_pushdown=off';
EXPLAIN
SELECT a, MIN(c) FROM t1 WHERE b = 'x' AND c > 'x' GROUP BY a;
diff --git a/mysql-test/main/myisam_icp.test b/mysql-test/main/myisam_icp.test
index a115d0f7caa..7b4ff23b50e 100644
--- a/mysql-test/main/myisam_icp.test
+++ b/mysql-test/main/myisam_icp.test
@@ -215,7 +215,7 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1 (a int, b varchar(1), c varchar(1), INDEX idx(b));
-INSERT INTO t1 VALUES (2,'x','x'), (5,'x','y');
+INSERT INTO t1 VALUES (2,'x','x'), (5,'x','y'), (6,'a','b'), (7,'a','b');
SET SESSION optimizer_switch='index_condition_pushdown=off';
EXPLAIN
diff --git a/mysql-test/main/myisam_mrr,32bit.rdiff b/mysql-test/main/myisam_mrr,32bit.rdiff
index 746bf48dedd..eda77abbfce 100644
--- a/mysql-test/main/myisam_mrr,32bit.rdiff
+++ b/mysql-test/main/myisam_mrr,32bit.rdiff
@@ -1,5 +1,5 @@
---- main/myisam_mrr.result 2019-05-14 15:44:52.232663568 +0530
-+++ main/myisam_mrr.reject 2019-05-14 15:51:37.123563538 +0530
+--- main/myisam_mrr.result
++++ main/myisam_mrr.reject
@@ -617,8 +617,8 @@
show status like 'handler_mrr%';
Variable_name Value
diff --git a/mysql-test/main/myisam_mrr.result b/mysql-test/main/myisam_mrr.result
index b758b2b3258..6bf72e688bc 100644
--- a/mysql-test/main/myisam_mrr.result
+++ b/mysql-test/main/myisam_mrr.result
@@ -517,7 +517,7 @@ table3.col_varchar_key = table2.col_varchar_nokey AND
table3.pk<>0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE table2 ALL col_varchar_key NULL NULL NULL 40 Using where
-1 SIMPLE table3 ref PRIMARY,col_varchar_key col_varchar_key 3 test.table2.col_varchar_key 5 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE table3 ALL PRIMARY,col_varchar_key NULL NULL NULL 40 Using where; Using join buffer (flat, BNL join)
set join_cache_level= @save_join_cache_level;
set join_buffer_size= @save_join_buffer_size;
drop table t1;
@@ -572,8 +572,7 @@ Handler_mrr_rowid_refills 0
create table t0 (a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 (a int, b int, filler char(200), key(a));
-insert into t1
-select A.a+10*B.a+100*C.a+1000*D.a, 123,'filler' from t0 A, t0 B, t0 C, t0 D;
+insert into t1 select seq, 123, 'filler' from seq_0_to_14999;
explain select sum(b) from t1 where a < 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 9 Using index condition; Rowid-ordered scan
diff --git a/mysql-test/main/myisam_mrr.test b/mysql-test/main/myisam_mrr.test
index 11c9aa64ef1..601844ab385 100644
--- a/mysql-test/main/myisam_mrr.test
+++ b/mysql-test/main/myisam_mrr.test
@@ -1,6 +1,8 @@
#
# MRR/MyISAM tests.
#
+--source include/have_sequence.inc
+
--disable_warnings
drop table if exists t0, t1, t2, t3;
@@ -278,8 +280,7 @@ show status like 'Handler_mrr%';
create table t0 (a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 (a int, b int, filler char(200), key(a));
-insert into t1
-select A.a+10*B.a+100*C.a+1000*D.a, 123,'filler' from t0 A, t0 B, t0 C, t0 D;
+insert into t1 select seq, 123, 'filler' from seq_0_to_14999;
explain select sum(b) from t1 where a < 10;
--echo # This should show one MRR scan and no re-fills:
diff --git a/mysql-test/main/mysql.result b/mysql-test/main/mysql.result
index 0b91b513b17..1792f09d3ac 100644
--- a/mysql-test/main/mysql.result
+++ b/mysql-test/main/mysql.result
@@ -637,3 +637,6 @@ drop table t1;
WARNING: option '--enable-cleartext-plugin' is obsolete.
1
1
+#
+# MDEV-30327 Client crashes in print_last_query_cost
+#
diff --git a/mysql-test/main/mysql.test b/mysql-test/main/mysql.test
index 0f41add821a..5aa130f8a4c 100644
--- a/mysql-test/main/mysql.test
+++ b/mysql-test/main/mysql.test
@@ -709,10 +709,19 @@ drop table t1;
--echo #
--exec $MYSQL -NHe "select 1 as a"
-
#
# Test obsolete option --enable-cleartext-plugin
# This should proceed with a warning
#
--echo
--exec $MYSQL test --enable-cleartext-plugin -e "select 1"
+
+--echo #
+--echo # MDEV-30327 Client crashes in print_last_query_cost
+--echo #
+
+--disable_query_log
+--disable_result_log
+--exec $MYSQL --show-query-costs --port=$MASTER_MYPORT -e "show tables in mysql like 'foo'"
+--enable_result_log
+--enable_query_log
diff --git a/mysql-test/main/mysql_client_test.result b/mysql-test/main/mysql_client_test.result
index dbc1feaa23b..e32053e50f3 100644
--- a/mysql-test/main/mysql_client_test.result
+++ b/mysql-test/main/mysql_client_test.result
@@ -130,7 +130,7 @@ mysql_stmt_next_result(): 0; field_count: 0
# cat MYSQL_TMP_DIR/test_mdev26145.out.log
# ------------------------------------
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def MAX(a) MAX(a) 3 11 0 Y 32768 0 63
+def MAX(a) MAX(a) 3 11 0 Y 49152 0 63
# ------------------------------------
diff --git a/mysql-test/main/mysql_json_mysql_upgrade.test b/mysql-test/main/mysql_json_mysql_upgrade.test
index 4380b004c70..8271530a1e0 100644
--- a/mysql-test/main/mysql_json_mysql_upgrade.test
+++ b/mysql-test/main/mysql_json_mysql_upgrade.test
@@ -26,7 +26,7 @@ let $MYSQLD_DATADIR= `select @@datadir`;
show create table mysql_json_test;
--exec $MYSQL_UPGRADE --force 2>&1
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
show create table mysql_json_test;
select * from mysql_json_test;
diff --git a/mysql-test/main/mysql_json_mysql_upgrade_with_plugin_loaded.test b/mysql-test/main/mysql_json_mysql_upgrade_with_plugin_loaded.test
index f3e9c2e539a..da730ba8f8c 100644
--- a/mysql-test/main/mysql_json_mysql_upgrade_with_plugin_loaded.test
+++ b/mysql-test/main/mysql_json_mysql_upgrade_with_plugin_loaded.test
@@ -26,7 +26,7 @@ let $MYSQLD_DATADIR= `select @@datadir`;
show create table mysql_json_test;
--exec $MYSQL_UPGRADE --force 2>&1
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
show create table mysql_json_test;
show create table mysql_json_test_big;
diff --git a/mysql-test/main/mysql_upgrade-20228.test b/mysql-test/main/mysql_upgrade-20228.test
index 32c05fd1245..99e8914b737 100644
--- a/mysql-test/main/mysql_upgrade-20228.test
+++ b/mysql-test/main/mysql_upgrade-20228.test
@@ -40,8 +40,8 @@ WHERE TABLE_SCHEMA='mysql' AND TABLE_NAME='user';
--echo # Running mysql_upgrade
--exec $MYSQL_UPGRADE --default-character-set=utf8mb4 --force 2>&1
---file_exists $MYSQLD_DATADIR/mysql_upgrade_info
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--file_exists $MYSQLD_DATADIR/mariadb_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
--echo #
--echo # Restoring character_set_client and collation_connection back
diff --git a/mysql-test/main/mysql_upgrade-6984.test b/mysql-test/main/mysql_upgrade-6984.test
index 034310e036f..f46c122cf02 100644
--- a/mysql-test/main/mysql_upgrade-6984.test
+++ b/mysql-test/main/mysql_upgrade-6984.test
@@ -14,7 +14,7 @@
update mysql.global_priv set priv=json_set(priv, '$.plugin', 'mysql_native_password', '$.authentication_string', password('foo')) where user='root';
---replace_regex /[^ ]*mysql_upgrade_info/...mysql_upgrade_info/
+--replace_regex /[^ ]*mariadb_upgrade_info/...mariadb_upgrade_info/
--exec $MYSQL_UPGRADE
connect(con1,localhost,root,foo,,,);
@@ -25,7 +25,7 @@ flush privileges;
set global event_scheduler=OFF;
let MYSQLD_DATADIR= `select @@datadir`;
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
# --skip-grant-tables state may changed during the test. Need to restart the server
# to restore the --skip-grant-tables state. Otherwise MTR's internal check will fail
diff --git a/mysql-test/main/mysql_upgrade.result b/mysql-test/main/mysql_upgrade.result
index f119937bf59..0b1ec84c592 100644
--- a/mysql-test/main/mysql_upgrade.result
+++ b/mysql-test/main/mysql_upgrade.result
@@ -150,8 +150,8 @@ test
Phase 7/7: Running 'FLUSH PRIVILEGES'
OK
Run it again - should say already completed
-This installation of MariaDB is already upgraded to VERSION.There is no need to run mysql_upgrade again for VERSION.
-You can use --force if you still want to run mysql_upgrade
+This installation of MariaDB is already upgraded to VERSION.There is no need to run mariadb-upgrade again for VERSION.
+You can use --force if you still want to run mariadb-upgrade
Force should run it regardless of whether it has been run before
Phase 1/7: Checking and upgrading mysql database
Processing databases
@@ -975,7 +975,7 @@ OK
# Bug#11827359 60223: MYSQL_UPGRADE PROBLEM WITH OPTION
# SKIP-WRITE-BINLOG
#
-# Droping the previously created mysql_upgrade_info file..
+# Droping the previously created mariadb_upgrade_info file..
# Running mysql_upgrade with --skip-write-binlog..
Phase 1/7: Checking and upgrading mysql database
Processing databases
@@ -1323,7 +1323,7 @@ length(table_name)
drop table extralongname_extralongname_extralongname_extralongname_ext;
# End of 10.0 tests
set sql_mode=default;
-# Droping the previously created mysql_upgrade_info file..
+# Droping the previously created mariadb_upgrade_info file..
create table test.t1(a int) engine=MyISAM;
# Trying to enforce InnoDB for all tables
SET GLOBAL enforce_storage_engine=InnoDB;
@@ -1896,24 +1896,24 @@ FLUSH PRIVILEGES;
# MDEV-27279: mariadb_upgrade add --check-if-upgrade-is-needed
#
This installation of MariaDB is already upgraded to MariaDB .
-There is no need to run mysql_upgrade again for MariaDB .
+There is no need to run mariadb-upgrade again for MariaDB .
Looking for 'mariadb' as: mariadb
This installation of MariaDB is already upgraded to MariaDB .
-There is no need to run mysql_upgrade again for MariaDB .
+There is no need to run mariadb-upgrade again for MariaDB .
#
# MDEV-27279: mariadb_upgrade check-if-upgrade absence is do it
#
Looking for 'mariadb' as: mariadb
-Empty or non existent ...mysql_upgrade_info. Assuming mysql_upgrade has to be run!
+Empty or non existent ...mariadb_upgrade_info. Assuming mysql_upgrade has to be run!
#
# MDEV-27279: mariadb_upgrade check-if-upgrade with minor version change
#
Looking for 'mariadb' as: mariadb
This installation of MariaDB is already upgraded to MariaDB .
-There is no need to run mysql_upgrade again for MariaDB .
+There is no need to run mariadb-upgrade again for MariaDB .
This installation of MariaDB is already upgraded to MariaDB .
-There is no need to run mysql_upgrade again for MariaDB .
-You can use --force if you still want to run mysql_upgrade
+There is no need to run mariadb-upgrade again for MariaDB .
+You can use --force if you still want to run mariadb-upgrade
#
# MDEV-27279: mariadb_upgrade check-if-upgrade with major version change
#
diff --git a/mysql-test/main/mysql_upgrade.test b/mysql-test/main/mysql_upgrade.test
index b9ff05401f7..8d2a3334d7c 100644
--- a/mysql-test/main/mysql_upgrade.test
+++ b/mysql-test/main/mysql_upgrade.test
@@ -7,6 +7,19 @@
set sql_mode="";
call mtr.add_suppression("Incorrect definition of table mysql.column_stats:.*");
+
+# It should create a file in the MySQL Servers datadir
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+#
+# MDEV-30498 Rename mysql_upgrade state file to mariadb_upgrade.
+#
+# Ensure that old $MYSQLD_DATADIR/mysql_upgrade_info file is deleted.
+#
+
+write_file $MYSQLD_DATADIR/mysql_upgrade_info;
+EOF
+
#
# Basic test that we can run mysql_upgrde and that it finds the
# expected binaries it uses.
@@ -14,22 +27,22 @@ call mtr.add_suppression("Incorrect definition of table mysql.column_stats:.*");
--echo Run mysql_upgrade once
--exec $MYSQL_UPGRADE --force 2>&1
-# It should have created a file in the MySQL Servers datadir
-let $MYSQLD_DATADIR= `select @@datadir`;
+--error 1
file_exists $MYSQLD_DATADIR/mysql_upgrade_info;
+file_exists $MYSQLD_DATADIR/mariadb_upgrade_info;
--echo Run it again - should say already completed
--replace_regex /upgraded to [^\n].*/upgraded to VERSION./ /again for [^\n]*/again for VERSION./
--exec $MYSQL_UPGRADE 2>&1
# It should have created a file in the MySQL Servers datadir
-file_exists $MYSQLD_DATADIR/mysql_upgrade_info;
+file_exists $MYSQLD_DATADIR/mariadb_upgrade_info;
--echo Force should run it regardless of whether it has been run before
--exec $MYSQL_UPGRADE --force 2>&1
# It should have created a file in the MySQL Servers datadir
-file_exists $MYSQLD_DATADIR/mysql_upgrade_info;
+file_exists $MYSQLD_DATADIR/mariadb_upgrade_info;
#
@@ -123,16 +136,16 @@ DROP USER 'user3'@'%';
let $MYSQLD_DATADIR= `select @@datadir`;
---echo # Droping the previously created mysql_upgrade_info file..
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--echo # Droping the previously created mariadb_upgrade_info file..
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
--echo # Running mysql_upgrade with --skip-write-binlog..
---replace_regex /[^ ]*mysql_upgrade_info/...mysql_upgrade_info/
+--replace_regex /[^ ]*mariadb_upgrade_info/...mariadb_upgrade_info/
--exec $MYSQL_UPGRADE --skip-write-binlog
-# mysql_upgrade must have created mysql_upgrade_info file,
+# mysql_upgrade must have created mariadb_upgrade_info file,
# so the following command should never fail.
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
--echo #
--echo # Bug #21489398: MYSQL_UPGRADE: FATAL ERROR: UPGRADE FAILED - IMPROVE ERROR
@@ -201,8 +214,8 @@ set sql_mode=default;
#
# Enforce storage engine option should not effect mysql_upgrade
#
---echo # Droping the previously created mysql_upgrade_info file..
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--echo # Droping the previously created mariadb_upgrade_info file..
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
create table test.t1(a int) engine=MyISAM;
--echo # Trying to enforce InnoDB for all tables
@@ -215,9 +228,9 @@ SET GLOBAL enforce_storage_engine=InnoDB;
SELECT count(*) FROM information_schema.tables where ENGINE="InnoDB";
SHOW CREATE TABLE test.t1;
DROP TABLE test.t1;
-# mysql_upgrade must have created mysql_upgrade_info file,
+# mysql_upgrade must have created mariadb_upgrade_info file,
# so the following command should never fail.
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
SET GLOBAL enforce_storage_engine=NULL;
--echo # End of 10.1 tests
@@ -300,11 +313,11 @@ FLUSH PRIVILEGES;
--echo # MDEV-27279: mariadb_upgrade check-if-upgrade absence is do it
--echo #
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
---replace_regex /[^ ]*mysql_upgrade_info/...mysql_upgrade_info/
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
+--replace_regex /[^ ]*mariadb_upgrade_info/...mariadb_upgrade_info/
--exec $MYSQL_UPGRADE --check-if-upgrade-is-needed
---replace_regex /'mariadb.* as:[^\n]*/'mariadb' as: mariadb/ /open .* Assuming/open XXX. Assuming/ /[^ ]*mysql_upgrade_info/...mysql_upgrade_info/
+--replace_regex /'mariadb.* as:[^\n]*/'mariadb' as: mariadb/ /open .* Assuming/open XXX. Assuming/ /[^ ]*mariadb_upgrade_info/...mariadb_upgrade_info/
--exec $MYSQL_UPGRADE --check-if-upgrade-is-needed --verbose
--echo #
@@ -319,7 +332,7 @@ perl;
my $ver= $ENV{'MYSQL_SERVER_VERSION'} or die "MYSQL_SERVER_VERSION not set";
my $file= $ENV{'DATADIR'} or die "MYSQLD_DATADIR not set";
$ver =~ s/^(\d*)\.(\d*).(\d*)(.*)/$1.$2.0$4/;
- open(FILE, ">$file/mysql_upgrade_info") or die "Failed to open $file";
+ open(FILE, ">$file/mariadb_upgrade_info") or die "Failed to open $file";
print FILE "$ver\n";
close(FILE);
EOF
@@ -331,21 +344,29 @@ EOF
--exec $MYSQL_UPGRADE --check-if-upgrade-is-needed --verbose
--replace_regex /\d\d\.\d*\.\d*[^ .\n]*/MariaDB /
--exec $MYSQL_UPGRADE
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
--echo #
--echo # MDEV-27279: mariadb_upgrade check-if-upgrade with major version change
--echo #
-# take 2rd number of version and change to 0
-
+# take 2rd number of version and change to 0. If the 2rd number is already 0,
+# change the first number
let DATADIR= $MYSQLD_DATADIR;
perl;
my $ver= $ENV{'MYSQL_SERVER_VERSION'} or die "MYSQL_SERVER_VERSION not set";
my $file= $ENV{'DATADIR'} or die "MYSQLD_DATADIR not set";
- $ver =~ s/^(\d*)\.(\d*).(\d*)(.*)/$1.0.$3$4/;
- open(FILE, ">$file/mysql_upgrade_info") or die "Failed to open $file";
+ open(FILE, ">$file/mariadb_upgrade_info") or die "Failed to open $file";
+ if ($ver =~ m/(\d*)\.0\.(\d*)(.*)/)
+ {
+ my $prev= $1-1;
+ $ver= $prev . '.0.' . $2 . $3;
+ }
+ else
+ {
+ $ver =~ s/^(\d*)\.(\d*)\.(\d*)(.*)/$1.0.$3$4/;
+ }
print FILE "$ver\n";
close(FILE);
EOF
@@ -355,7 +376,7 @@ EOF
--exec $MYSQL_UPGRADE --check-if-upgrade-is-needed
--replace_regex /\d\d\.\d*\.\d*[^ .\n]*/MariaDB / /'mariadb.* as:[^\n]*/'mysql' as: mysql/
--exec $MYSQL_UPGRADE --check-if-upgrade-is-needed --verbose
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
drop table mysql.global_priv;
rename table mysql.global_priv_bak to mysql.global_priv;
@@ -376,7 +397,7 @@ alter table mysql.db drop column Delete_history_priv;
--source include/restart_mysqld.inc
--echo Run mysql_upgrade with all privileges on a user
--exec $MYSQL_UPGRADE --force --silent 2>&1
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
flush privileges;
SHOW GRANTS FOR 'user3'@'%';
DROP USER 'user3'@'%';
@@ -395,7 +416,7 @@ SHOW GLOBAL VARIABLES LIKE 'alter_algorithm';
--exec $MYSQL_UPGRADE --force 2>&1
SET GLOBAL alter_algorithm=DEFAULT;
SHOW GLOBAL VARIABLES LIKE 'alter_algorithm';
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
--echo End of 10.3 tests
@@ -413,7 +434,7 @@ disconnect con1;
connection default;
--echo # mysql_upgrade --force --silent 2>&1
--exec $MYSQL_UPGRADE --force --silent 2>&1
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
show create user user3@localhost;
connect con1,localhost,user3,a_password;
select current_user();
@@ -431,7 +452,7 @@ drop view mysql.user_bak;
drop table mysql.innodb_index_stats, mysql.innodb_table_stats;
--echo # mysql_upgrade --force --silent 2>&1
--exec $MYSQL_UPGRADE --force --silent 2>&1
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
drop table mysql.global_priv;
rename table mysql.global_priv_bak to mysql.global_priv;
@@ -443,7 +464,7 @@ drop view mysql.user_bak;
alter table mysql.user change authentication_string auth_string text collate utf8_bin not null;
--echo # mysql_upgrade --force --silent 2>&1
--exec $MYSQL_UPGRADE --force --silent 2>&1
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
select count(*) from mysql.global_priv;
drop table mysql.global_priv;
rename table mysql.global_priv_bak to mysql.global_priv;
@@ -484,7 +505,7 @@ connection default;
drop table mysql.global_priv;
rename table mysql.global_priv_bak to mysql.global_priv;
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
--echo # End of 10.4 tests
diff --git a/mysql-test/main/mysql_upgrade_mysql_json_datatype.test b/mysql-test/main/mysql_upgrade_mysql_json_datatype.test
index 13d8ff5754a..f0f20681beb 100644
--- a/mysql-test/main/mysql_upgrade_mysql_json_datatype.test
+++ b/mysql-test/main/mysql_upgrade_mysql_json_datatype.test
@@ -58,4 +58,4 @@ drop table tempty;
drop table mysql_json_test;
drop table mysql_json_test_big;
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
diff --git a/mysql-test/main/mysql_upgrade_no_innodb.test b/mysql-test/main/mysql_upgrade_no_innodb.test
index 8813a450450..bac155f8129 100644
--- a/mysql-test/main/mysql_upgrade_no_innodb.test
+++ b/mysql-test/main/mysql_upgrade_no_innodb.test
@@ -3,4 +3,4 @@
--exec $MYSQL_UPGRADE --force --upgrade-system-tables 2>&1
let $MYSQLD_DATADIR= `select @@datadir`;
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
diff --git a/mysql-test/main/mysql_upgrade_noengine.result b/mysql-test/main/mysql_upgrade_noengine.result
index 459a1a6ce34..830b574fb9c 100644
--- a/mysql-test/main/mysql_upgrade_noengine.result
+++ b/mysql-test/main/mysql_upgrade_noengine.result
@@ -11,7 +11,7 @@ table_name t1
table_type BASE TABLE
engine BLACKHOLE
row_format Fixed
-table_rows 0
+table_rows 2
data_length 0
table_comment
select table_catalog, table_schema, table_name, table_type, engine, row_format, table_rows, data_length, table_comment from information_schema.tables where table_schema='test' and table_name='t2';
@@ -610,7 +610,7 @@ table_name t1
table_type BASE TABLE
engine BLACKHOLE
row_format Fixed
-table_rows 0
+table_rows 2
data_length 0
table_comment
select table_catalog, table_schema, table_name, table_type, engine, row_format, table_rows, data_length, table_comment from information_schema.tables where table_schema='test' and table_name='t2';
diff --git a/mysql-test/main/mysql_upgrade_noengine.test b/mysql-test/main/mysql_upgrade_noengine.test
index a2d229dc9be..0fa9414ecb5 100644
--- a/mysql-test/main/mysql_upgrade_noengine.test
+++ b/mysql-test/main/mysql_upgrade_noengine.test
@@ -46,9 +46,9 @@ drop view mysql.user_bak;
# pretend it's an upgrade from 10.0
alter table mysql.user drop column default_role, drop column max_statement_time;
-# but mysql_upgrade_info tells otherwise
-remove_file $datadir/mysql_upgrade_info;
-write_file $datadir/mysql_upgrade_info;
+# but mariadb_upgrade_info tells otherwise
+remove_file $datadir/mariadb_upgrade_info;
+write_file $datadir/mariadb_upgrade_info;
10.1.10-MariaDB
EOF
@@ -64,7 +64,7 @@ source include/switch_to_mysql_user.inc;
drop view mysql.user_bak;
alter table mysql.user drop column default_role, drop column max_statement_time;
-remove_file $datadir/mysql_upgrade_info;
+remove_file $datadir/mariadb_upgrade_info;
--echo # upgrade from 10.0 - engines are enabled
--replace_regex /\d\d\.\d*\.\d*[^ .\n]*/MariaDB /
@@ -74,7 +74,7 @@ select table_catalog, table_schema, table_name, table_type, engine, row_format,
drop table t1, t2;
-remove_file $datadir/mysql_upgrade_info;
+remove_file $datadir/mariadb_upgrade_info;
uninstall plugin blackhole;
uninstall plugin archive;
diff --git a/mysql-test/main/mysql_upgrade_ssl.test b/mysql-test/main/mysql_upgrade_ssl.test
index daf5fad3df7..678d429b6d0 100644
--- a/mysql-test/main/mysql_upgrade_ssl.test
+++ b/mysql-test/main/mysql_upgrade_ssl.test
@@ -9,4 +9,4 @@
--echo #
--exec $MYSQL_UPGRADE --skip-verbose --skip-silent --ssl --force 2>&1
--let $datadir= `select @@datadir`
---remove_file $datadir/mysql_upgrade_info
+--remove_file $datadir/mariadb_upgrade_info
diff --git a/mysql-test/main/mysql_upgrade_to_100502.result b/mysql-test/main/mysql_upgrade_to_100502.result
index 7235b16aa5a..54f4d273c5e 100644
--- a/mysql-test/main/mysql_upgrade_to_100502.result
+++ b/mysql-test/main/mysql_upgrade_to_100502.result
@@ -13,12 +13,12 @@ CREATE USER user_super@localhost;
GRANT SUPER ON *.* TO user_super@localhost;
SHOW GRANTS FOR user_super@localhost;
Grants for user_super@localhost
-GRANT SUPER, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, BINLOG ADMIN, BINLOG REPLAY ON *.* TO `user_super`@`localhost`
+GRANT SUPER, BINLOG MONITOR, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY, SLAVE MONITOR ON *.* TO `user_super`@`localhost`
CREATE USER user_super_replslave@localhost;
GRANT SUPER, REPLICATION SLAVE ON *.* TO user_super_replslave@localhost;
SHOW GRANTS FOR user_super_replslave@localhost;
Grants for user_super_replslave@localhost
-GRANT SUPER, REPLICATION SLAVE, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY, SLAVE MONITOR ON *.* TO `user_super_replslave`@`localhost`
+GRANT SUPER, REPLICATION SLAVE, BINLOG MONITOR, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY, SLAVE MONITOR ON *.* TO `user_super_replslave`@`localhost`
#
# MDEV-23610: Slave user can't run "SHOW SLAVE STATUS" anymore after upgrade to 10.5, mysql_upgrade should take of that
#
@@ -30,7 +30,7 @@ CREATE USER user_replsuper@localhost;
GRANT SUPER ON *.* TO user_replsuper@localhost;
SHOW GRANTS FOR user_replsuper@localhost;
Grants for user_replsuper@localhost
-GRANT SUPER, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, BINLOG ADMIN, BINLOG REPLAY ON *.* TO `user_replsuper`@`localhost`
+GRANT SUPER, BINLOG MONITOR, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY, SLAVE MONITOR ON *.* TO `user_replsuper`@`localhost`
#
# Users with privilege REPLICATION CLIENT prior to 10.5 should successfully execute
# SHOW SLAVE STATUS command
@@ -62,13 +62,13 @@ GRANT ALL PRIVILEGES ON *.* TO `user_all`@`localhost` WITH GRANT OPTION
#
SHOW GRANTS FOR user_super@localhost;
Grants for user_super@localhost
-GRANT SUPER, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, BINLOG ADMIN, BINLOG REPLAY ON *.* TO `user_super`@`localhost`
+GRANT SUPER, BINLOG MONITOR, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY, SLAVE MONITOR ON *.* TO `user_super`@`localhost`
#
# Should automatically get all new 10.5.2 priveleges that were splitted from SUPER, plus REPLICATION MASTER ADMIN
#
SHOW GRANTS FOR user_super_replslave@localhost;
Grants for user_super_replslave@localhost
-GRANT SUPER, REPLICATION SLAVE, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY, SLAVE MONITOR ON *.* TO `user_super_replslave`@`localhost`
+GRANT SUPER, REPLICATION SLAVE, BINLOG MONITOR, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY, SLAVE MONITOR ON *.* TO `user_super_replslave`@`localhost`
#
# MDEV-23610: Slave user can't run "SHOW SLAVE STATUS" anymore after upgrade to 10.5, mysql_upgrade should take of that
#
diff --git a/mysql-test/main/mysql_upgrade_to_100502.test b/mysql-test/main/mysql_upgrade_to_100502.test
index fc47f0c94aa..a5bda5b6546 100644
--- a/mysql-test/main/mysql_upgrade_to_100502.test
+++ b/mysql-test/main/mysql_upgrade_to_100502.test
@@ -55,7 +55,7 @@ SHOW GRANTS FOR user_replslave@localhost;
--echo # mysql_upgrade --force --silent 2>&1
--exec $MYSQL_UPGRADE --force --silent 2>&1
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
FLUSH PRIVILEGES;
--echo #
diff --git a/mysql-test/main/mysql_upgrade_view.test b/mysql-test/main/mysql_upgrade_view.test
index d3d955e7cae..4cff5780ce2 100644
--- a/mysql-test/main/mysql_upgrade_view.test
+++ b/mysql-test/main/mysql_upgrade_view.test
@@ -185,6 +185,6 @@ flush tables;
# back to mariadb default
drop table mysql.event;
rename table mysql.ev_bk to mysql.event;
-remove_file $MYSQLD_DATADIR/mysql_upgrade_info;
+remove_file $MYSQLD_DATADIR/mariadb_upgrade_info;
drop view v1,v2,v3;
drop table t1;
diff --git a/mysql-test/main/mysqlbinlog.result b/mysql-test/main/mysqlbinlog.result
index f7c7b2c677e..f940a271260 100644
--- a/mysql-test/main/mysqlbinlog.result
+++ b/mysql-test/main/mysqlbinlog.result
@@ -407,16 +407,26 @@ ROLLBACK /* added by mysqlbinlog */;
/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
DELIMITER /*!*/;
ROLLBACK/*!*/;
-SET TIMESTAMP=1108844556/*!*/;
+use `test`/*!*/;
+SET TIMESTAMP=1140641973/*!*/;
SET @@session.pseudo_thread_id=999999999/*!*/;
+SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=1, @@session.unique_checks=1, @@session.autocommit=1, @@session.check_constraint_checks=1, @@session.system_versioning_insert_history=0/*!*/;
+SET @@session.sql_mode=#/*!*/;
SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
+/*!\C latin1 *//*!*/;
+SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=#/*!*/;
SET @@session.lc_time_names=0/*!*/;
SET @@session.collation_database=DEFAULT/*!*/;
-BEGIN
+CREATE TABLE t1(c INT)
/*!*/;
-use `test`/*!*/;
-SET TIMESTAMP=1108844555/*!*/;
-insert t1 values (1)
+SET TIMESTAMP=1140641985/*!*/;
+CREATE TABLE t2(s CHAR(200))
+/*!*/;
+SET TIMESTAMP=1140642018/*!*/;
+CREATE TRIGGER trg1 AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES(CURRENT_USER())
+/*!*/;
+SET TIMESTAMP=1140642025/*!*/;
+INSERT INTO t1 VALUES(1)
/*!*/;
DELIMITER ;
# End of log file
@@ -427,16 +437,21 @@ ROLLBACK /* added by mysqlbinlog */;
/*!40019 SET @@session.max_delayed_threads=0*/;
/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
DELIMITER /*!*/;
-SET TIMESTAMP=1108844556/*!*/;
+ROLLBACK/*!*/;
+use `test`/*!*/;
+SET TIMESTAMP=1140642018/*!*/;
SET @@session.pseudo_thread_id=999999999/*!*/;
+SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=1, @@session.unique_checks=1, @@session.autocommit=1, @@session.check_constraint_checks=1, @@session.system_versioning_insert_history=0/*!*/;
+SET @@session.sql_mode=#/*!*/;
SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
+/*!\C latin1 *//*!*/;
+SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=#/*!*/;
SET @@session.lc_time_names=0/*!*/;
SET @@session.collation_database=DEFAULT/*!*/;
-BEGIN
+CREATE TRIGGER trg1 AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES(CURRENT_USER())
/*!*/;
-use `test`/*!*/;
-SET TIMESTAMP=1108844555/*!*/;
-insert t1 values (1)
+SET TIMESTAMP=1140642025/*!*/;
+INSERT INTO t1 VALUES(1)
/*!*/;
DELIMITER ;
# End of log file
@@ -1269,7 +1284,7 @@ DELIMITER ;
ROLLBACK /* added by mysqlbinlog */;
/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;
-mariadb-binlog Ver VER for OS at ARCH
+mariadb-binlog from #.#.#-MariaDB, client #.# for OS (ARCH)
#
# Test --rewrite-db
#
diff --git a/mysql-test/main/mysqlbinlog.test b/mysql-test/main/mysqlbinlog.test
index b12709583e4..430cdb708cd 100644
--- a/mysql-test/main/mysqlbinlog.test
+++ b/mysql-test/main/mysqlbinlog.test
@@ -123,13 +123,13 @@ select "--- reading stdin --" as "";
--enable_query_log
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
--replace_regex /SQL_LOAD_MB-[0-9a-f]+-[0-9a-f]+/SQL_LOAD_MB-#-#/ /@@session.sql_mode=\d+/@@session.sql_mode=#/ /collation_server=\d+/collation_server=#/
---exec $MYSQL_BINLOG --short-form - < $MYSQL_TEST_DIR/std_data/trunc_binlog.000001
+--exec $MYSQL_BINLOG --short-form - < $MYSQL_TEST_DIR/std_data/bug16266.000001
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
--replace_regex /SQL_LOAD_MB-[0-9a-f]+-[0-9a-f]+/SQL_LOAD_MB-#-#/ /@@session.sql_mode=\d+/@@session.sql_mode=#/ /collation_server=\d+/collation_server=#/
# postion is constant to correspond to an event in pre-recorded binlog
---let $binlog_start_pos=79
---exec $MYSQL_BINLOG --short-form --start-position=$binlog_start_pos - < $MYSQL_TEST_DIR/std_data/trunc_binlog.000001
+--let $binlog_start_pos=274
+--exec $MYSQL_BINLOG --short-form --start-position=$binlog_start_pos - < $MYSQL_TEST_DIR/std_data/bug16266.000001
drop table t1,t2;
@@ -608,7 +608,7 @@ eval SET GLOBAL SERVER_ID = $old_server_id;
#
# MDEV-12372 mysqlbinlog --version output is the same on 10.x as on 5.5.x, and contains not only version
#
-replace_regex /.*mariadb-binlog(\.exe)? Ver .* for .* at [-_a-zA-Z0-9]+/mariadb-binlog Ver VER for OS at ARCH/;
+replace_regex /for \S+/for OS/ /\d+/#/ /#[-_A-Za-z0-9]*-MariaDB,/#-MariaDB,/ /\(.*\)/(ARCH)/ /^.*binlog(\.exe)?/mariadb-binlog/;
exec $MYSQL_BINLOG --version;
--echo #
diff --git a/mysql-test/main/mysqld--help.result b/mysql-test/main/mysqld--help.result
index 5720988df2f..458a2ceaad0 100644
--- a/mysql-test/main/mysqld--help.result
+++ b/mysql-test/main/mysqld--help.result
@@ -508,8 +508,8 @@ The following specify which files/extra groups are read (specified before remain
--log-slow-admin-statements
Log slow OPTIMIZE, ANALYZE, ALTER and other
administrative statements to the slow log if it is open.
- Resets or sets the option 'admin' in
- log_slow_disabled_statements
+ Resets or sets the option 'admin' in log_slow_filter.
+ Deprecated, use log_slow_filter without 'admin'.
(Defaults to on; use --skip-log-slow-admin-statements to disable.)
--log-slow-disabled-statements=name
Don't log certain types of statements to slow log. Any
@@ -720,10 +720,32 @@ The following specify which files/extra groups are read (specified before remain
max_connections*5 or max_connections + table_cache*2
(whichever is larger) number of file descriptors
(Automatically configured unless set explicitly)
+ --optimizer-disk-read-cost=#
+ Cost of reading a block of IO_SIZE (4096) from a disk (in
+ usec).
+ --optimizer-disk-read-ratio=#
+ Chance that we have to do a disk read to find a row or
+ index entry from the engine cache
+ (cache_misses/total_cache_requests). 0.0 means that
+ everything is cached and 1.0 means that nothing is
+ expected to be in the engine cache.
--optimizer-extra-pruning-depth=#
If the optimizer needs to enumerate join prefix of this
size or larger, then it will try agressively prune away
the search space.
+ --optimizer-index-block-copy-cost=#
+ Cost of copying a key block from the cache to intern
+ storage as part of an index scan.
+ --optimizer-key-compare-cost=#
+ Cost of checking a key against the end key condition.
+ --optimizer-key-copy-cost=#
+ Cost of finding the next key in the engine and copying it
+ to the SQL layer.
+ --optimizer-key-lookup-cost=#
+ Cost for finding a key based on a key value
+ --optimizer-key-next-find-cost=#
+ Cost of finding the next key and rowid when using
+ filters.
--optimizer-max-sel-arg-weight=#
The maximum weight of the SEL_ARG graph. Set to 0 for no
limit
@@ -734,6 +756,21 @@ The following specify which files/extra groups are read (specified before remain
heuristic, thus perform exhaustive search: 1 - prune
plans based on cost and number of retrieved rows eq_ref:
2 - prune also if we find an eq_ref chain
+ --optimizer-row-copy-cost=#
+ Cost of copying a row from the engine or the join cache
+ to the SQL layer.
+ --optimizer-row-lookup-cost=#
+ Cost of finding a row based on a rowid or a clustered
+ key.
+ --optimizer-row-next-find-cost=#
+ Cost of finding the next row when scanning the table.
+ --optimizer-rowid-compare-cost=#
+ Cost of comparing two rowid's
+ --optimizer-rowid-copy-cost=#
+ Cost of copying a rowid
+ --optimizer-scan-setup-cost=#
+ Extra cost added to TABLE and INDEX scans to get
+ optimizer to prefer index lookups.
--optimizer-search-depth=#
Maximum depth of search performed by the query optimizer.
Values larger than the number of relations in a query
@@ -786,6 +823,10 @@ The following specify which files/extra groups are read (specified before remain
the cardinality of a partial join.5 - additionally use
selectivity of certain non-range predicates calculated on
record samples
+ --optimizer-where-cost=#
+ Cost of checking the row against the WHERE clause.
+ Increasing this will have the optimizer to prefer plans
+ with less row combinations.
--performance-schema
Enable the performance schema.
--performance-schema-accounts-size=#
@@ -1591,7 +1632,7 @@ gtid-pos-auto-engines
gtid-strict-mode FALSE
help TRUE
histogram-size 254
-histogram-type DOUBLE_PREC_HB
+histogram-type JSON_HB
host-cache-size 279
idle-readonly-transaction-timeout 0
idle-transaction-timeout 0
@@ -1698,15 +1739,29 @@ old-alter-table DEFAULT
old-mode UTF8_IS_UTF8MB3
old-passwords FALSE
old-style-user-limits FALSE
+optimizer-disk-read-cost 10.24
+optimizer-disk-read-ratio 0.02
optimizer-extra-pruning-depth 8
+optimizer-index-block-copy-cost 0.0356
+optimizer-key-compare-cost 0.011361
+optimizer-key-copy-cost 0.015685
+optimizer-key-lookup-cost 0.435777
+optimizer-key-next-find-cost 0.082347
optimizer-max-sel-arg-weight 32000
optimizer-prune-level 2
+optimizer-row-copy-cost 0.060866
+optimizer-row-lookup-cost 0.130839
+optimizer-row-next-find-cost 0.045916
+optimizer-rowid-compare-cost 0.002653
+optimizer-rowid-copy-cost 0.002653
+optimizer-scan-setup-cost 10
optimizer-search-depth 62
optimizer-selectivity-sampling-limit 100
optimizer-switch index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
optimizer-trace
optimizer-trace-max-mem-size 1048576
optimizer-use-condition-selectivity 4
+optimizer-where-cost 0.032
performance-schema FALSE
performance-schema-accounts-size -1
performance-schema-consumer-events-stages-current FALSE
diff --git a/mysql-test/main/mysqld--help.test b/mysql-test/main/mysqld--help.test
index a73562e4249..ad2307f43c6 100644
--- a/mysql-test/main/mysqld--help.test
+++ b/mysql-test/main/mysqld--help.test
@@ -29,6 +29,7 @@ perl;
collation-server character-set-server log-tc-size table-cache
table-open-cache table-open-cache-instances max-connections
tls-version version.* password-reuse-check
+ provider-bzip2 provider-lzma provider-lzo
password-reuse-check-interval/;
# Plugins which may or may not be there:
diff --git a/mysql-test/main/mysqldump.result b/mysql-test/main/mysqldump.result
index 13fc0cc9fe6..2c15a4841eb 100644
--- a/mysql-test/main/mysqldump.result
+++ b/mysql-test/main/mysqldump.result
@@ -2822,8 +2822,6 @@ CREATE PROCEDURE bug9056_proc2(OUT a INT)
BEGIN
select sum(id) from t1 into a;
END //
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
set sql_mode='ansi';
create procedure `a'b` () select 1;
set sql_mode='';
@@ -3733,8 +3731,8 @@ reset master;
mariadb-dump: Couldn't execute 'FLUSH /*!40101 LOCAL */ TABLES': Access denied; you need (at least one of) the RELOAD privilege(s) for this operation (1227)
mariadb-dump: Couldn't execute 'FLUSH /*!40101 LOCAL */ TABLES': Access denied; you need (at least one of) the RELOAD privilege(s) for this operation (1227)
grant RELOAD on *.* to mysqltest_1@localhost;
-mariadb-dump: Couldn't execute 'SHOW MASTER STATUS': Access denied; you need (at least one of) the SUPER, BINLOG MONITOR privilege(s) for this operation (1227)
-mariadb-dump: Couldn't execute 'SHOW MASTER STATUS': Access denied; you need (at least one of) the SUPER, BINLOG MONITOR privilege(s) for this operation (1227)
+mariadb-dump: Couldn't execute 'SHOW MASTER STATUS': Access denied; you need (at least one of) the BINLOG MONITOR privilege(s) for this operation (1227)
+mariadb-dump: Couldn't execute 'SHOW MASTER STATUS': Access denied; you need (at least one of) the BINLOG MONITOR privilege(s) for this operation (1227)
grant REPLICATION CLIENT on *.* to mysqltest_1@localhost;
drop table t1;
drop user mysqltest_1@localhost;
diff --git a/mysql-test/main/named_pipe.result b/mysql-test/main/named_pipe.result
index 2baa3471ec9..a0738cdad34 100644
--- a/mysql-test/main/named_pipe.result
+++ b/mysql-test/main/named_pipe.result
@@ -1,5 +1,6 @@
connect pipe_con,localhost,root,,,,,PIPE;
drop table if exists t1,t2,t3,t4;
+set @@default_storage_engine="aria";
CREATE TABLE t1 (
Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
@@ -600,6 +601,9 @@ explain select t3.t2nr,fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL fld1 NULL NULL NULL 1199 Using where; Using temporary; Using filesort
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.fld1 1 Using where; Using index
+#
+# Some test with ORDER BY and limit
+#
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using filesort
@@ -1289,7 +1293,7 @@ companynr tinyint(2) unsigned zerofill NOT NULL default '00',
companyname char(30) NOT NULL default '',
PRIMARY KEY (companynr),
UNIQUE KEY companyname(companyname)
-) ENGINE=MyISAM MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
+) ENGINE=aria MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
select STRAIGHT_JOIN t2.companynr,companyname from t4,t2 where t2.companynr=t4.companynr group by t2.companynr;
companynr companyname
00 Unknown
@@ -1379,6 +1383,9 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
delete from t2 where fld1=999999;
+#
+# Test left join optimization
+#
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
@@ -1393,15 +1400,15 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 and companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr is null;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1417,11 +1424,11 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0 or companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where ifnull(companynr,1)>0;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/main/null_key.result b/mysql-test/main/null_key.result
index 6b9d59c636a..cee3484a304 100644
--- a/mysql-test/main/null_key.result
+++ b/mysql-test/main/null_key.result
@@ -181,12 +181,12 @@ insert into t2 values (7),(8);
explain select * from t2 straight_join t1 where t1.a=t2.a and b is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where
-1 SIMPLE t1 ref a,b a 10 test.t2.a,const 2 Using where; Using index
+1 SIMPLE t1 ref a,b a 10 test.t2.a,const 1 Using where; Using index
drop index b on t1;
explain select * from t2,t1 where t1.a=t2.a and b is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where
-1 SIMPLE t1 ref a a 10 test.t2.a,const 2 Using where; Using index
+1 SIMPLE t1 ref a a 10 test.t2.a,const 1 Using where; Using index
select * from t2,t1 where t1.a=t2.a and b is null;
a a b
7 7 NULL
@@ -258,10 +258,11 @@ PRIMARY KEY (id)
) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,NULL),(2,NULL),(3,1),(4,2),(5,NULL),(6,NULL),(7,3),(8,4),(9,NULL),(10,NULL);
INSERT INTO t1 VALUES (11,5),(12,6),(13,7),(14,8),(15,9);
+INSERT INTO t1 VALUES (1000,1000),(1010,1010);
INSERT INTO t2 VALUES (1,NULL),(2,NULL),(3,1),(4,2),(5,NULL),(6,NULL),(7,3),(8,4),(9,NULL),(10,NULL);
explain select id from t1 where uniq_id is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL idx1 NULL NULL NULL 15 Using where
+1 SIMPLE t1 ref idx1 idx1 5 const 6 Using index condition
explain select id from t1 where uniq_id =1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const idx1 idx1 5 const 1
@@ -285,6 +286,7 @@ id
110
DELETE FROM t1 WHERE uniq_id IS NULL;
DELETE FROM t2 WHERE uniq_id IS NULL;
+DELETE FROM t1 WHERE id >= 1000;
SELECT * FROM t1 ORDER BY uniq_id, id;
id uniq_id
3 1
diff --git a/mysql-test/main/null_key.test b/mysql-test/main/null_key.test
index 7eabd6d5dc3..65a93022a2e 100644
--- a/mysql-test/main/null_key.test
+++ b/mysql-test/main/null_key.test
@@ -104,6 +104,7 @@ CREATE TABLE t2 (
INSERT INTO t1 VALUES (1,NULL),(2,NULL),(3,1),(4,2),(5,NULL),(6,NULL),(7,3),(8,4),(9,NULL),(10,NULL);
INSERT INTO t1 VALUES (11,5),(12,6),(13,7),(14,8),(15,9);
+INSERT INTO t1 VALUES (1000,1000),(1010,1010);
INSERT INTO t2 VALUES (1,NULL),(2,NULL),(3,1),(4,2),(5,NULL),(6,NULL),(7,3),(8,4),(9,NULL),(10,NULL);
#
# Check IS NULL optimization
@@ -122,6 +123,12 @@ select id from t2 where uniq_id is null;
#
DELETE FROM t1 WHERE uniq_id IS NULL;
DELETE FROM t2 WHERE uniq_id IS NULL;
+
+#
+# Delete extra records that were used to force null optimization
+#
+DELETE FROM t1 WHERE id >= 1000;
+
#
# Select what is left -- notice the difference
#
diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result
index 2d52392dedb..41e2afbd43b 100644
--- a/mysql-test/main/opt_trace.result
+++ b/mysql-test/main/opt_trace.result
@@ -118,7 +118,8 @@ select * from v1 {
"table": "t1",
"table_scan": {
"rows": 2,
- "cost": 2.004394531
+ "read_cost": 0.01028441,
+ "read_and_compare_cost": 0.01034841
}
}
]
@@ -126,23 +127,30 @@ select * from v1 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 1,
- "cost": 2.204394531,
+ "rows": 2,
+ "rows_after_filter": 1,
+ "rows_out": 1,
+ "cost": 0.01034841,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 1,
- "cost": 2.204394531,
+ "rows_read": 1,
+ "rows_out": 1,
+ "cost": 0.01034841,
"uses_join_buffering": false
}
}
@@ -150,15 +158,17 @@ select * from v1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 1,
- "cost_for_plan": 2.404394531
+ "cost_for_plan": 0.01034841
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 1,
+ "cost": 0.01034841
},
{
"substitute_best_equal": {
@@ -172,10 +182,13 @@ select * from v1 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": "t1.a = 1"
+ "attached_condition": "t1.a = 1"
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -276,7 +289,8 @@ select * from (select * from t1 where t1.a=1)q {
"table": "t1",
"table_scan": {
"rows": 2,
- "cost": 2.004394531
+ "read_cost": 0.01028441,
+ "read_and_compare_cost": 0.01034841
}
}
]
@@ -284,23 +298,30 @@ select * from (select * from t1 where t1.a=1)q {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 1,
- "cost": 2.204394531,
+ "rows": 2,
+ "rows_after_filter": 1,
+ "rows_out": 1,
+ "cost": 0.01034841,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 1,
- "cost": 2.204394531,
+ "rows_read": 1,
+ "rows_out": 1,
+ "cost": 0.01034841,
"uses_join_buffering": false
}
}
@@ -308,15 +329,17 @@ select * from (select * from t1 where t1.a=1)q {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 1,
- "cost_for_plan": 2.404394531
+ "cost_for_plan": 0.01034841
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 1,
+ "cost": 0.01034841
},
{
"substitute_best_equal": {
@@ -330,10 +353,13 @@ select * from (select * from t1 where t1.a=1)q {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": "t1.a = 1"
+ "attached_condition": "t1.a = 1"
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -439,7 +465,8 @@ select * from v2 {
"table": "t1",
"table_scan": {
"rows": 2,
- "cost": 2.004394531
+ "read_cost": 0.01028441,
+ "read_and_compare_cost": 0.01034841
}
}
]
@@ -447,24 +474,31 @@ select * from v2 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 1,
- "cost": 2.204394531,
+ "rows": 2,
+ "rows_after_filter": 1,
+ "rows_out": 1,
+ "cost": 0.01034841,
+ "index_only": false,
"chosen": true,
"use_tmp_table": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 1,
- "cost": 2.204394531,
+ "rows_read": 1,
+ "rows_out": 1,
+ "cost": 0.01034841,
"uses_join_buffering": false
}
}
@@ -472,16 +506,18 @@ select * from v2 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 1,
- "cost_for_plan": 2.404394531,
- "cost_for_sorting": 1
+ "cost_for_plan": 0.01034841,
+ "cost_for_sorting": 6.301866e-4
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 1,
+ "cost": 0.010978597
},
{
"substitute_best_equal": {
@@ -495,10 +531,13 @@ select * from v2 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": "t1.a = 1"
+ "attached_condition": "t1.a = 1"
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -519,7 +558,8 @@ select * from v2 {
"table": "<derived2>",
"table_scan": {
"rows": 2,
- "cost": 2
+ "read_cost": 0.012350033,
+ "read_and_compare_cost": 0.012418701
}
}
]
@@ -527,23 +567,30 @@ select * from v2 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "<derived2>",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 2,
- "cost": 2,
+ "rows": 2,
+ "rows_after_filter": 2,
+ "rows_out": 2,
+ "cost": 0.012418701,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 2,
- "cost": 2,
+ "rows_read": 2,
+ "rows_out": 2,
+ "cost": 0.012418701,
"uses_join_buffering": false
}
}
@@ -551,15 +598,17 @@ select * from v2 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "<derived2>",
"rows_for_plan": 2,
- "cost_for_plan": 2.4
+ "cost_for_plan": 0.012418701
}
]
},
{
- "best_join_order": ["<derived2>"]
+ "best_join_order": ["<derived2>"],
+ "rows": 2,
+ "cost": 0.012418701
},
{
"attaching_conditions_to_tables": {
@@ -567,10 +616,13 @@ select * from v2 {
"attached_conditions_summary": [
{
"table": "<derived2>",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -663,7 +715,8 @@ explain select * from v2 {
"table": "t2",
"table_scan": {
"rows": 10,
- "cost": 2.021972656
+ "read_cost": 0.01127965,
+ "read_and_compare_cost": 0.01159965
}
}
]
@@ -671,23 +724,30 @@ explain select * from v2 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t2",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 10,
- "cost": 2.021972656,
+ "rows": 10,
+ "rows_after_filter": 10,
+ "rows_out": 10,
+ "cost": 0.01159965,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 10,
- "cost": 2.021972656,
+ "rows_read": 10,
+ "rows_out": 10,
+ "cost": 0.01159965,
"uses_join_buffering": false
}
}
@@ -695,15 +755,17 @@ explain select * from v2 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t2",
"rows_for_plan": 10,
- "cost_for_plan": 4.021972656
+ "cost_for_plan": 0.01159965
}
]
},
{
- "best_join_order": ["t2"]
+ "best_join_order": ["t2"],
+ "rows": 10,
+ "cost": 0.01159965
},
{
"attaching_conditions_to_tables": {
@@ -711,10 +773,13 @@ explain select * from v2 {
"attached_conditions_summary": [
{
"table": "t2",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -786,7 +851,8 @@ explain select * from v1 {
"table": "t1",
"table_scan": {
"rows": 10,
- "cost": 2.021972656
+ "read_cost": 0.01127965,
+ "read_and_compare_cost": 0.01159965
}
}
]
@@ -794,24 +860,31 @@ explain select * from v1 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 10,
- "cost": 2.021972656,
+ "rows": 10,
+ "rows_after_filter": 10,
+ "rows_out": 10,
+ "cost": 0.01159965,
+ "index_only": false,
"chosen": true,
"use_tmp_table": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 10,
- "cost": 2.021972656,
+ "rows_read": 10,
+ "rows_out": 10,
+ "cost": 0.01159965,
"uses_join_buffering": false
}
}
@@ -819,16 +892,18 @@ explain select * from v1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 10,
- "cost_for_plan": 4.021972656,
- "cost_for_sorting": 10
+ "cost_for_plan": 0.01159965,
+ "cost_for_sorting": 0.006368384
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 10,
+ "cost": 0.017968034
},
{
"attaching_conditions_to_tables": {
@@ -836,10 +911,13 @@ explain select * from v1 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -860,7 +938,8 @@ explain select * from v1 {
"table": "<derived2>",
"table_scan": {
"rows": 10,
- "cost": 10
+ "read_cost": 0.012414166,
+ "read_and_compare_cost": 0.012757506
}
}
]
@@ -868,23 +947,30 @@ explain select * from v1 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "<derived2>",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 10,
- "cost": 10,
+ "rows": 10,
+ "rows_after_filter": 10,
+ "rows_out": 10,
+ "cost": 0.012757506,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 10,
- "cost": 10,
+ "rows_read": 10,
+ "rows_out": 10,
+ "cost": 0.012757506,
"uses_join_buffering": false
}
}
@@ -892,15 +978,17 @@ explain select * from v1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "<derived2>",
"rows_for_plan": 10,
- "cost_for_plan": 12
+ "cost_for_plan": 0.012757506
}
]
},
{
- "best_join_order": ["<derived2>"]
+ "best_join_order": ["<derived2>"],
+ "rows": 10,
+ "cost": 0.012757506
},
{
"attaching_conditions_to_tables": {
@@ -908,10 +996,13 @@ explain select * from v1 {
"attached_conditions_summary": [
{
"table": "<derived2>",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -1032,14 +1123,16 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b {
"table": "t1",
"table_scan": {
"rows": 100,
- "cost": 2.317382812
+ "read_cost": 0.0224761,
+ "read_and_compare_cost": 0.0256761
}
},
{
"table": "t2",
"table_scan": {
"rows": 100,
- "cost": 2.317382812
+ "read_cost": 0.0224761,
+ "read_and_compare_cost": 0.0256761
}
}
]
@@ -1047,23 +1140,30 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 100,
- "cost": 2.317382812,
+ "rows": 100,
+ "rows_after_filter": 100,
+ "rows_out": 100,
+ "cost": 0.0256761,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 100,
- "cost": 2.317382812,
+ "rows_read": 100,
+ "rows_out": 100,
+ "cost": 0.0256761,
"uses_join_buffering": false
}
}
@@ -1071,18 +1171,25 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b {
{
"best_access_path": {
"table": "t2",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 100,
- "cost": 2.317382812,
+ "rows": 100,
+ "rows_after_filter": 100,
+ "rows_out": 100,
+ "cost": 0.0256761,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 100,
- "cost": 2.317382812,
+ "rows_read": 100,
+ "rows_out": 100,
+ "cost": 0.0256761,
"uses_join_buffering": false
}
}
@@ -1090,39 +1197,45 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 100,
- "cost_for_plan": 22.31738281,
+ "cost_for_plan": 0.0256761,
"rest_of_plan": [
{
- "plan_prefix": ["t1"],
+ "plan_prefix": "t1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t2",
+ "plan_details": {
+ "record_count": 100
+ },
"considered_access_paths": [
{
"access_type": "ref",
"index": "a",
"used_range_estimates": false,
"reason": "not available",
- "rowid_filter_skipped": "cost_factor <= 0",
"rows": 1,
- "cost": 200.0585794,
+ "cost": 0.1821659,
"chosen": true
},
{
- "access_type": "scan",
- "resulting_rows": 100,
- "cost": 2.317382812,
+ "access_type": "scan_with_join_cache",
+ "rows": 100,
+ "rows_after_filter": 100,
+ "rows_out": 1,
+ "cost": 0.9604227,
+ "index_only": false,
"chosen": false
}
],
"chosen_access_method": {
"type": "ref",
- "records": 1,
- "cost": 200.0585794,
+ "rows_read": 1,
+ "rows_out": 1,
+ "cost": 0.1821659,
"uses_join_buffering": false
}
}
@@ -1130,47 +1243,53 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b {
]
},
{
- "plan_prefix": ["t1"],
+ "plan_prefix": "t1",
"table": "t2",
"rows_for_plan": 100,
- "cost_for_plan": 242.3759623
+ "cost_for_plan": 0.207842
}
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t2",
"rows_for_plan": 100,
- "cost_for_plan": 22.31738281,
+ "cost_for_plan": 0.0256761,
"rest_of_plan": [
{
- "plan_prefix": ["t2"],
+ "plan_prefix": "t2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 100
+ },
"considered_access_paths": [
{
"access_type": "ref",
"index": "a",
"used_range_estimates": false,
"reason": "not available",
- "rowid_filter_skipped": "cost_factor <= 0",
"rows": 1,
- "cost": 200.0585794,
+ "cost": 0.1821659,
"chosen": true
},
{
- "access_type": "scan",
- "resulting_rows": 100,
- "cost": 2.317382812,
+ "access_type": "scan_with_join_cache",
+ "rows": 100,
+ "rows_after_filter": 100,
+ "rows_out": 1,
+ "cost": 0.9604227,
+ "index_only": false,
"chosen": false
}
],
"chosen_access_method": {
"type": "ref",
- "records": 1,
- "cost": 200.0585794,
+ "rows_read": 1,
+ "rows_out": 1,
+ "cost": 0.1821659,
"uses_join_buffering": false
}
}
@@ -1178,20 +1297,22 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b {
]
},
{
- "plan_prefix": ["t2"],
+ "plan_prefix": "t2",
"table": "t1",
"rows_for_plan": 100,
- "cost_for_plan": 242.3759623,
+ "cost_for_plan": 0.207842,
"pruned_by_cost": true,
- "current_cost": 242.3759623,
- "best_cost": 242.3759623
+ "current_cost": 0.207842,
+ "best_cost": 0.207842
}
]
}
]
},
{
- "best_join_order": ["t1", "t2"]
+ "best_join_order": ["t1", "t2"],
+ "rows": 100,
+ "cost": 0.207842
},
{
"substitute_best_equal": {
@@ -1205,14 +1326,17 @@ explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": "t1.b is not null"
+ "attached_condition": "t1.b is not null"
},
{
"table": "t2",
- "attached": "t1.a = t2.b + 2"
+ "attached_condition": "t1.a = t2.b + 2"
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -1230,10 +1354,11 @@ drop table t1,t2,t0;
# group_by min max optimization
#
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a INT NOT NULL, KEY(a));
+insert into t1 select seq, mod(seq,4)+1 from seq_1_to_65536;
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status OK
+test.t1 analyze status Table is already up to date
EXPLAIN SELECT DISTINCT a FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL a 4 NULL 5 Using index for group-by
@@ -1272,7 +1397,7 @@ EXPLAIN SELECT DISTINCT a FROM t1 {
"range_analysis": {
"table_scan": {
"rows": 65536,
- "cost": 13255.2
+ "cost": 10.29477568
},
"potential_range_indexes": [
{
@@ -1288,9 +1413,8 @@ EXPLAIN SELECT DISTINCT a FROM t1 {
],
"best_covering_index_scan": {
"index": "a",
- "cost": 13377.39141,
- "chosen": false,
- "cause": "cost"
+ "cost": 9.123706862,
+ "chosen": true
},
"group_index_range": {
"distinct_query": true,
@@ -1299,7 +1423,7 @@ EXPLAIN SELECT DISTINCT a FROM t1 {
"index": "a",
"covering": true,
"rows": 5,
- "cost": 6.25
+ "cost": 0.004191135
}
]
},
@@ -1311,7 +1435,7 @@ EXPLAIN SELECT DISTINCT a FROM t1 {
"max_aggregate": false,
"distinct_aggregate": false,
"rows": 5,
- "cost": 6.25,
+ "cost": 0.004191135,
"key_parts_used_for_access": ["a"],
"ranges": [],
"chosen": true
@@ -1325,12 +1449,12 @@ EXPLAIN SELECT DISTINCT a FROM t1 {
"max_aggregate": false,
"distinct_aggregate": false,
"rows": 5,
- "cost": 6.25,
+ "cost": 0.004191135,
"key_parts_used_for_access": ["a"],
"ranges": []
},
"rows_for_plan": 5,
- "cost_for_plan": 6.25,
+ "cost_for_plan": 0.004191135,
"chosen": true
}
}
@@ -1340,23 +1464,29 @@ EXPLAIN SELECT DISTINCT a FROM t1 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "index_merge",
- "resulting_rows": 5,
- "cost": 6.25,
+ "rows": 5,
+ "rows_after_filter": 5,
+ "rows_out": 5,
+ "cost": 0.004191135,
"chosen": true
}
],
"chosen_access_method": {
"type": "index_merge",
- "records": 5,
- "cost": 6.25,
+ "rows_read": 5,
+ "rows_out": 5,
+ "cost": 0.004191135,
"uses_join_buffering": false
}
}
@@ -1364,15 +1494,17 @@ EXPLAIN SELECT DISTINCT a FROM t1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 5,
- "cost_for_plan": 7.25
+ "cost_for_plan": 0.004191135
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 5,
+ "cost": 0.004191135
},
{
"attaching_conditions_to_tables": {
@@ -1380,10 +1512,13 @@ EXPLAIN SELECT DISTINCT a FROM t1 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -1408,10 +1543,13 @@ test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 20 NULL 8 Using where; Using index for group-by
+1 SIMPLE t1 range NULL a 20 NULL 7 Using where; Using index for group-by
+set statement optimizer_scan_setup_cost=0 for EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL a 20 NULL 7 Using where; Using index
select * from information_schema.OPTIMIZER_TRACE;
QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
-EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
+set statement optimizer_scan_setup_cost=0 for EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
"steps": [
{
"join_preparation": {
@@ -1467,7 +1605,7 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
"range_analysis": {
"table_scan": {
"rows": 7,
- "cost": 5.429052734
+ "cost": 0.001130435
},
"potential_range_indexes": [
{
@@ -1478,8 +1616,9 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
],
"best_covering_index_scan": {
"index": "a",
- "cost": 2.409226263,
- "chosen": true
+ "cost": 0.001758432,
+ "chosen": false,
+ "cause": "cost"
},
"setup_range_conditions": [],
"analyzing_range_alternatives": {
@@ -1495,8 +1634,8 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
"index": "a",
"covering": true,
"ranges": ["(2,3) <= (b,c) <= (2,3)"],
- "rows": 8,
- "cost": 2.2
+ "rows": 7,
+ "cost": 0.004425189
}
]
},
@@ -1507,54 +1646,61 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
"min_aggregate": true,
"max_aggregate": false,
"distinct_aggregate": false,
- "rows": 8,
- "cost": 2.2,
+ "rows": 7,
+ "cost": 0.004425189,
"key_parts_used_for_access": ["a", "b", "c"],
"ranges": ["(2,3) <= (b,c) <= (2,3)"],
- "chosen": true
- },
- "chosen_range_access_summary": {
- "range_access_plan": {
- "type": "index_group",
- "index": "a",
- "min_max_arg": "d",
- "min_aggregate": true,
- "max_aggregate": false,
- "distinct_aggregate": false,
- "rows": 8,
- "cost": 2.2,
- "key_parts_used_for_access": ["a", "b", "c"],
- "ranges": ["(2,3) <= (b,c) <= (2,3)"]
- },
- "rows_for_plan": 8,
- "cost_for_plan": 2.2,
- "chosen": true
+ "chosen": false,
+ "cause": "cost"
}
}
+ },
+ {
+ "selectivity_for_indexes": [],
+ "selectivity_for_columns": [
+ {
+ "column_name": "b",
+ "ranges": ["2 <= b <= 2"],
+ "selectivity_from_histogram": 0.285714286
+ },
+ {
+ "column_name": "c",
+ "ranges": ["3 <= c <= 3"],
+ "selectivity_from_histogram": 0.285714286
+ }
+ ],
+ "cond_selectivity": 0.081632653
}
]
},
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
- "access_type": "index_merge",
- "resulting_rows": 8,
- "cost": 2.2,
+ "access_type": "scan",
+ "rows": 7,
+ "rows_after_filter": 1,
+ "rows_out": 0.571428573,
+ "cost": 0.001758432,
+ "index_only": true,
"chosen": true,
"use_tmp_table": true
}
],
"chosen_access_method": {
- "type": "index_merge",
- "records": 8,
- "cost": 2.2,
+ "type": "scan",
+ "rows_read": 1,
+ "rows_out": 0.571428573,
+ "cost": 0.001758432,
"uses_join_buffering": false
}
}
@@ -1562,16 +1708,21 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
- "rows_for_plan": 8,
- "cost_for_plan": 3.8,
- "cost_for_sorting": 8
+ "rows_for_plan": 0.571428573,
+ "cost_for_plan": 0.001758432,
+ "pushdown_cond_selectivity": 0.571428573,
+ "filtered": 8.163265322,
+ "rows_out": 0.571428573,
+ "cost_for_sorting": 3.585611e-4
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 0.571428573,
+ "cost": 0.002116993
},
{
"substitute_best_equal": {
@@ -1585,7 +1736,32 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": "t1.b = 2 and t1.c = 3"
+ "attached_condition": "t1.b = 2 and t1.c = 3"
+ }
+ ]
+ }
+ },
+ {
+ "make_join_readinfo": []
+ },
+ {
+ "reconsidering_access_paths_for_index_ordering": {
+ "clause": "GROUP BY",
+ "table": "t1",
+ "rows_estimation": 1,
+ "filesort_cost": 4.579083e-5,
+ "read_cost": 0.001804223,
+ "filesort_type": "priority_queue with addon fields",
+ "fanout": 1,
+ "possible_keys": [
+ {
+ "index": "a",
+ "can_resolve_order": true,
+ "direction": 1,
+ "rows_to_examine": 7,
+ "range_scan": false,
+ "scan_cost": 0.001758432,
+ "chosen": true
}
]
}
@@ -1673,7 +1849,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
"range_analysis": {
"table_scan": {
"rows": 16,
- "cost": 7.23125
+ "cost": 0.01253808
},
"potential_range_indexes": [
{
@@ -1684,7 +1860,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
],
"best_covering_index_scan": {
"index": "id",
- "cost": 4.21171589,
+ "cost": 0.008002862,
"chosen": true
},
"setup_range_conditions": [],
@@ -1702,7 +1878,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
"covering": true,
"ranges": ["(2001-01-04) <= (a)"],
"rows": 9,
- "cost": 2.35
+ "cost": 0.005620843
}
]
},
@@ -1714,7 +1890,7 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
"max_aggregate": true,
"distinct_aggregate": false,
"rows": 9,
- "cost": 2.35,
+ "cost": 0.005620843,
"key_parts_used_for_access": ["id"],
"ranges": ["(2001-01-04) <= (a)"],
"chosen": true
@@ -1728,12 +1904,12 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
"max_aggregate": true,
"distinct_aggregate": false,
"rows": 9,
- "cost": 2.35,
+ "cost": 0.005620843,
"key_parts_used_for_access": ["id"],
"ranges": ["(2001-01-04) <= (a)"]
},
"rows_for_plan": 9,
- "cost_for_plan": 2.35,
+ "cost_for_plan": 0.005620843,
"chosen": true
}
}
@@ -1743,24 +1919,30 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "index_merge",
- "resulting_rows": 9,
- "cost": 2.35,
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.005620843,
"chosen": true,
"use_tmp_table": true
}
],
"chosen_access_method": {
"type": "index_merge",
- "records": 9,
- "cost": 2.35,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.005620843,
"uses_join_buffering": false
}
}
@@ -1768,16 +1950,18 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 9,
- "cost_for_plan": 4.15,
- "cost_for_sorting": 9
+ "cost_for_plan": 0.005620843,
+ "cost_for_sorting": 0.005728198
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 9,
+ "cost": 0.011349041
},
{
"substitute_best_equal": {
@@ -1791,10 +1975,13 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": "t1.a >= 20010104e0"
+ "attached_condition": "t1.a >= 20010104e0"
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -1868,7 +2055,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
"range_analysis": {
"table_scan": {
"rows": 16,
- "cost": 7.23125
+ "cost": 0.01253808
},
"potential_range_indexes": [
{
@@ -1879,7 +2066,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
],
"best_covering_index_scan": {
"index": "id",
- "cost": 4.21171589,
+ "cost": 0.008002862,
"chosen": true
},
"setup_range_conditions": [],
@@ -1897,7 +2084,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
"covering": true,
"ranges": ["(2001-01-04) <= (a) <= (2001-01-04)"],
"rows": 9,
- "cost": 2.35
+ "cost": 0.005620843
}
]
},
@@ -1909,7 +2096,7 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
"max_aggregate": false,
"distinct_aggregate": false,
"rows": 9,
- "cost": 2.35,
+ "cost": 0.005620843,
"key_parts_used_for_access": ["id", "a"],
"ranges": ["(2001-01-04) <= (a) <= (2001-01-04)"],
"chosen": true
@@ -1923,12 +2110,12 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
"max_aggregate": false,
"distinct_aggregate": false,
"rows": 9,
- "cost": 2.35,
+ "cost": 0.005620843,
"key_parts_used_for_access": ["id", "a"],
"ranges": ["(2001-01-04) <= (a) <= (2001-01-04)"]
},
"rows_for_plan": 9,
- "cost_for_plan": 2.35,
+ "cost_for_plan": 0.005620843,
"chosen": true
}
}
@@ -1938,24 +2125,30 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "index_merge",
- "resulting_rows": 9,
- "cost": 2.35,
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.005620843,
"chosen": true,
"use_tmp_table": true
}
],
"chosen_access_method": {
"type": "index_merge",
- "records": 9,
- "cost": 2.35,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.005620843,
"uses_join_buffering": false
}
}
@@ -1963,16 +2156,18 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 9,
- "cost_for_plan": 4.15,
- "cost_for_sorting": 9
+ "cost_for_plan": 0.005620843,
+ "cost_for_sorting": 0.005728198
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 9,
+ "cost": 0.011349041
},
{
"substitute_best_equal": {
@@ -1986,10 +2181,13 @@ EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": "t1.a = 20010104e0"
+ "attached_condition": "t1.a = 20010104e0"
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -2006,28 +2204,27 @@ drop table t1;
#
# Late ORDER BY optimization
#
-create table ten(a int);
-insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table one_k(a int primary key);
-insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C;
create table t1 (
pk int not null,
a int,
b int,
c int,
filler char(100),
-KEY a_a(c),
+KEY c(c),
KEY a_c(a,c),
KEY a_b(a,b)
);
-insert into t1
-select a, a,a,a, 'filler-dataaa' from test.one_k;
+insert into t1 select seq, seq,seq,seq, 'filler-dataaa' from seq_0_to_999;
update t1 set a=1 where pk between 0 and 180;
update t1 set b=2 where pk between 0 and 20;
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+explain select * from t1 where a=1 and b=2 order by c limit 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref a_c,a_b a_b 10 const,const 21 Using where; Using filesort
+update t1 set b=2 where pk between 20 and 40;
set optimizer_trace='enabled=on';
explain select * from t1 where a=1 and b=2 order by c limit 1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -2112,11 +2309,11 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"range_analysis": {
"table_scan": {
"rows": 1000,
- "cost": 232.5644531
+ "cost": 0.1731718
},
"potential_range_indexes": [
{
- "index": "a_a",
+ "index": "c",
"usable": false,
"cause": "not applicable"
},
@@ -2141,8 +2338,9 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"using_mrr": false,
"index_only": false,
"rows": 180,
- "cost": 216.2943776,
- "chosen": true
+ "cost": 0.223677504,
+ "chosen": false,
+ "cause": "cost"
},
{
"index": "a_b",
@@ -2150,8 +2348,8 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"rowid_ordered": true,
"using_mrr": false,
"index_only": false,
- "rows": 21,
- "cost": 25.36242739,
+ "rows": 41,
+ "cost": 0.051929313,
"chosen": true
}
],
@@ -2168,11 +2366,11 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"range_access_plan": {
"type": "range_scan",
"index": "a_b",
- "rows": 21,
+ "rows": 41,
"ranges": ["(1,2) <= (a,b) <= (1,2)"]
},
- "rows_for_plan": 21,
- "cost_for_plan": 25.36242739,
+ "rows_for_plan": 41,
+ "cost_for_plan": 0.051929313,
"chosen": true
}
}
@@ -2182,12 +2380,12 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"rowid_filters": [
{
"key": "a_b",
- "build_cost": 0.886777098,
- "rows": 21
+ "build_cost": 0.005839142,
+ "rows": 41
},
{
"key": "a_c",
- "build_cost": 10.52169992,
+ "build_cost": 0.024214742,
"rows": 180
}
]
@@ -2196,7 +2394,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"selectivity_for_indexes": [
{
"index_name": "a_b",
- "selectivity_from_index": 0.021
+ "selectivity_from_index": 0.041
}
],
"selectivity_for_columns": [
@@ -2211,34 +2409,36 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"selectivity_from_histogram": 0.021
}
],
- "cond_selectivity": 0.021
+ "cond_selectivity": 0.041
}
]
},
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "ref",
"index": "a_c",
"used_range_estimates": true,
- "rowid_filter_skipped": "worst/max seeks clipping",
"rows": 180,
- "cost": 180.2743776,
+ "cost": 0.222922562,
"chosen": true
},
{
"access_type": "ref",
"index": "a_b",
"used_range_estimates": true,
- "rows": 21,
- "cost": 21.14242739,
+ "rows": 41,
+ "cost": 0.051379171,
"chosen": true
},
{
@@ -2249,8 +2449,9 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
],
"chosen_access_method": {
"type": "ref",
- "records": 21,
- "cost": 21.14242739,
+ "rows_read": 41,
+ "rows_out": 41,
+ "cost": 0.051379171,
"uses_join_buffering": false
}
}
@@ -2258,15 +2459,17 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
- "rows_for_plan": 21,
- "cost_for_plan": 25.34242739
+ "rows_for_plan": 41,
+ "cost_for_plan": 0.051379171
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 41,
+ "cost": 0.051379171
},
{
"substitute_best_equal": {
@@ -2280,36 +2483,40 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
},
{
+ "make_join_readinfo": []
+ },
+ {
"reconsidering_access_paths_for_index_ordering": {
"clause": "ORDER BY",
- "fanout": 1,
- "read_time": 21.14342739,
"table": "t1",
- "rows_estimation": 21,
+ "rows_estimation": 41,
+ "filesort_cost": 9.387121e-4,
+ "read_cost": 0.052317883,
+ "filesort_type": "priority_queue with addon fields",
+ "fanout": 1,
"possible_keys": [
{
- "index": "a_a",
+ "index": "c",
"can_resolve_order": true,
"direction": 1,
- "updated_limit": 47,
- "index_scan_time": 47,
- "usable": false,
- "cause": "cost"
+ "rows_to_examine": 24,
+ "range_scan": false,
+ "scan_cost": 0.030403398,
+ "chosen": true
},
{
"index": "a_c",
"can_resolve_order": true,
"direction": 1,
- "updated_limit": 47,
- "range_scan_time": 4.331020747,
- "index_scan_time": 4.331020747,
- "records": 180,
+ "rows_to_examine": 4.390243902,
+ "range_scan": true,
+ "scan_cost": 0.023415994,
"chosen": true
},
{
@@ -2323,13 +2530,9 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
{
"table": "t1",
"range_analysis": {
- "table_scan": {
- "rows": 1000,
- "cost": 1.79769e308
- },
"potential_range_indexes": [
{
- "index": "a_a",
+ "index": "c",
"usable": false,
"cause": "not applicable"
},
@@ -2354,7 +2557,8 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"using_mrr": false,
"index_only": false,
"rows": 180,
- "cost": 216.2943776,
+ "cost": 0.223677504,
+ "cost_with_limit": 0.002574553,
"chosen": true
}
],
@@ -2375,7 +2579,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
"ranges": ["(1) <= (a) <= (1)"]
},
"rows_for_plan": 180,
- "cost_for_plan": 216.2943776,
+ "cost_for_plan": 0.223677504,
"chosen": true
}
}
@@ -2391,7 +2595,7 @@ explain select * from t1 where a=1 and b=2 order by c limit 1 {
}
]
} 0 0
-drop table t1,ten,one_k;
+drop table t1;
#
# TABLE ELIMINATION
#
@@ -2482,7 +2686,8 @@ select t1.a from t1 left join t2 on t1.a=t2.a {
"table": "t1",
"table_scan": {
"rows": 4,
- "cost": 2.006835938
+ "read_cost": 0.01053322,
+ "read_and_compare_cost": 0.01066122
}
},
{
@@ -2496,23 +2701,30 @@ select t1.a from t1 left join t2 on t1.a=t2.a {
{
"considered_execution_plans": [
{
- "plan_prefix": ["t2"],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 4,
- "cost": 2.006835938,
+ "rows": 4,
+ "rows_after_filter": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 4,
- "cost": 2.006835938,
+ "rows_read": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
"uses_join_buffering": false
}
}
@@ -2520,15 +2732,17 @@ select t1.a from t1 left join t2 on t1.a=t2.a {
]
},
{
- "plan_prefix": ["t2"],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 4,
- "cost_for_plan": 2.806835937
+ "cost_for_plan": 0.01066122
}
]
},
{
- "best_join_order": ["t2", "t1"]
+ "best_join_order": ["t2", "t1"],
+ "rows": 4,
+ "cost": 0.01066122
},
{
"substitute_best_equal": {
@@ -2537,19 +2751,23 @@ select t1.a from t1 left join t2 on t1.a=t2.a {
}
},
{
- "condition_on_constant_tables": "1",
- "computing_condition": []
- },
- {
"attaching_conditions_to_tables": {
- "attached_conditions_computation": [],
+ "attached_conditions_computation": [
+ {
+ "condition_on_constant_tables": "1",
+ "computing_condition": []
+ }
+ ],
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -2628,14 +2846,16 @@ explain select * from t1 left join t2 on t2.a=t1.a {
"table": "t1",
"table_scan": {
"rows": 4,
- "cost": 2.006835938
+ "read_cost": 0.01053322,
+ "read_and_compare_cost": 0.01066122
}
},
{
"table": "t2",
"table_scan": {
"rows": 2,
- "cost": 2.004394531
+ "read_cost": 0.01028441,
+ "read_and_compare_cost": 0.01034841
}
}
]
@@ -2643,23 +2863,30 @@ explain select * from t1 left join t2 on t2.a=t1.a {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 4,
- "cost": 2.006835938,
+ "rows": 4,
+ "rows_after_filter": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 4,
- "cost": 2.006835938,
+ "rows_read": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
"uses_join_buffering": false
}
}
@@ -2667,36 +2894,39 @@ explain select * from t1 left join t2 on t2.a=t1.a {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 4,
- "cost_for_plan": 2.806835937,
+ "cost_for_plan": 0.01066122,
"rest_of_plan": [
{
- "plan_prefix": ["t1"],
+ "plan_prefix": "t1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t2",
+ "plan_details": {
+ "record_count": 4
+ },
"considered_access_paths": [
{
"access_type": "eq_ref",
"index": "PRIMARY",
"rows": 1,
- "cost": 4,
+ "cost": 0.007120904,
"chosen": true
},
{
- "access_type": "scan",
- "resulting_rows": 2,
- "cost": 8.017578125,
- "chosen": false
+ "type": "scan",
+ "chosen": false,
+ "cause": "cost"
}
],
"chosen_access_method": {
"type": "eq_ref",
- "records": 1,
- "cost": 4,
+ "rows_read": 1,
+ "rows_out": 1,
+ "cost": 0.007120904,
"uses_join_buffering": false
}
}
@@ -2704,17 +2934,19 @@ explain select * from t1 left join t2 on t2.a=t1.a {
]
},
{
- "plan_prefix": ["t1"],
+ "plan_prefix": "t1",
"table": "t2",
"rows_for_plan": 4,
- "cost_for_plan": 7.606835937
+ "cost_for_plan": 0.017782124
}
]
}
]
},
{
- "best_join_order": ["t1", "t2"]
+ "best_join_order": ["t1", "t2"],
+ "rows": 4,
+ "cost": 0.017782124
},
{
"substitute_best_equal": {
@@ -2730,23 +2962,27 @@ explain select * from t1 left join t2 on t2.a=t1.a {
}
},
{
- "condition_on_constant_tables": "1",
- "computing_condition": []
- },
- {
"attaching_conditions_to_tables": {
- "attached_conditions_computation": [],
+ "attached_conditions_computation": [
+ {
+ "condition_on_constant_tables": "1",
+ "computing_condition": []
+ }
+ ],
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
},
{
"table": "t2",
- "attached": "trigcond(trigcond(t1.a is not null))"
+ "attached_condition": "trigcond(trigcond(t1.a is not null))"
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -2851,7 +3087,8 @@ explain select t1.a from t1 left join (t2 join t3 on t2.b=t3.b) on t2.a=t1.a and
"table": "t1",
"table_scan": {
"rows": 4,
- "cost": 2.006835938
+ "read_cost": 0.01053322,
+ "read_and_compare_cost": 0.01066122
}
},
{
@@ -2871,23 +3108,30 @@ explain select t1.a from t1 left join (t2 join t3 on t2.b=t3.b) on t2.a=t1.a and
{
"considered_execution_plans": [
{
- "plan_prefix": ["t3", "t2"],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 4,
- "cost": 2.006835938,
+ "rows": 4,
+ "rows_after_filter": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 4,
- "cost": 2.006835938,
+ "rows_read": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
"uses_join_buffering": false
}
}
@@ -2895,15 +3139,17 @@ explain select t1.a from t1 left join (t2 join t3 on t2.b=t3.b) on t2.a=t1.a and
]
},
{
- "plan_prefix": ["t3", "t2"],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 4,
- "cost_for_plan": 2.806835937
+ "cost_for_plan": 0.01066122
}
]
},
{
- "best_join_order": ["t3", "t2", "t1"]
+ "best_join_order": ["t3", "t2", "t1"],
+ "rows": 4,
+ "cost": 0.01066122
},
{
"substitute_best_equal": {
@@ -2912,19 +3158,23 @@ explain select t1.a from t1 left join (t2 join t3 on t2.b=t3.b) on t2.a=t1.a and
}
},
{
- "condition_on_constant_tables": "1",
- "computing_condition": []
- },
- {
"attaching_conditions_to_tables": {
- "attached_conditions_computation": [],
+ "attached_conditions_computation": [
+ {
+ "condition_on_constant_tables": "1",
+ "computing_condition": []
+ }
+ ],
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -2941,33 +3191,27 @@ drop table t0, t1, t2, t3;
#
# IN subquery to sem-join is traced
#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1(a int, b int);
-insert into t1 values (0,0),(1,1),(2,2);
-create table t2 as select * from t1;
-create table t11(a int, b int);
-create table t10 (pk int, a int);
-insert into t10 select a,a from t0;
-create table t12 like t10;
-insert into t12 select * from t10;
-analyze table t1,t10;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status OK
-test.t10 analyze status Engine-independent statistics collected
-test.t10 analyze status OK
+insert into t1 select seq,seq from seq_0_to_3;
+create table t2 (p int, a int);
+insert into t2 select seq,seq from seq_1_to_10;
set optimizer_trace='enabled=on';
-explain extended select * from t1 where a in (select pk from t10);
+explain extended select * from t1 where a in (select p from t2);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t10 ALL NULL NULL NULL NULL 10 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 10 10.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join (`test`.`t10`) where 1
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`p` = `test`.`t1`.`a`
+insert into t2 select seq,seq from seq_10_to_100;
+explain extended select * from t1 where a in (select p from t2);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 101 0.99 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`p` = `test`.`t1`.`a`
select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
-explain extended select * from t1 where a in (select pk from t10) {
+explain extended select * from t1 where a in (select p from t2) {
"steps": [
{
"join_preparation": {
@@ -2986,13 +3230,13 @@ explain extended select * from t1 where a in (select pk from t10) {
}
},
{
- "expanded_query": "/* select#2 */ select t10.pk from t10"
+ "expanded_query": "/* select#2 */ select t2.p from t2"
}
]
}
},
{
- "expanded_query": "/* select#1 */ select t1.a AS a,t1.b AS b from t1 where t1.a in (/* select#2 */ select t10.pk from t10)"
+ "expanded_query": "/* select#1 */ select t1.a AS a,t1.b AS b from t1 where t1.a in (/* select#2 */ select t2.p from t2)"
}
]
}
@@ -3021,19 +3265,19 @@ explain extended select * from t1 where a in (select pk from t10) {
{
"condition_processing": {
"condition": "WHERE",
- "original_condition": "1 and t1.a = t10.pk",
+ "original_condition": "1 and t1.a = t2.p",
"steps": [
{
"transformation": "equality_propagation",
- "resulting_condition": "1 and multiple equal(t1.a, t10.pk)"
+ "resulting_condition": "1 and multiple equal(t1.a, t2.p)"
},
{
"transformation": "constant_propagation",
- "resulting_condition": "1 and multiple equal(t1.a, t10.pk)"
+ "resulting_condition": "1 and multiple equal(t1.a, t2.p)"
},
{
"transformation": "trivial_condition_removal",
- "resulting_condition": "multiple equal(t1.a, t10.pk)"
+ "resulting_condition": "multiple equal(t1.a, t2.p)"
}
]
}
@@ -3047,7 +3291,7 @@ explain extended select * from t1 where a in (select pk from t10) {
"depends_on_map_bits": []
},
{
- "table": "t10",
+ "table": "t2",
"row_may_be_null": false,
"map_bit": 1,
"depends_on_map_bits": []
@@ -3062,15 +3306,17 @@ explain extended select * from t1 where a in (select pk from t10) {
{
"table": "t1",
"table_scan": {
- "rows": 3,
- "cost": 2.006591797
+ "rows": 4,
+ "read_cost": 0.01053322,
+ "read_and_compare_cost": 0.01066122
}
},
{
- "table": "t10",
+ "table": "t2",
"table_scan": {
- "rows": 10,
- "cost": 2.021972656
+ "rows": 101,
+ "read_cost": 0.022600505,
+ "read_and_compare_cost": 0.025832505
}
}
]
@@ -3086,23 +3332,30 @@ explain extended select * from t1 where a in (select pk from t10) {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
- "table": "t10",
+ "table": "t2",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 10,
- "cost": 2.021972656,
+ "rows": 101,
+ "rows_after_filter": 101,
+ "rows_out": 101,
+ "cost": 0.025832505,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 10,
- "cost": 2.021972656,
+ "rows_read": 101,
+ "rows_out": 101,
+ "cost": 0.025832505,
"uses_join_buffering": false
}
}
@@ -3110,10 +3363,10 @@ explain extended select * from t1 where a in (select pk from t10) {
]
},
{
- "plan_prefix": [],
- "table": "t10",
- "rows_for_plan": 10,
- "cost_for_plan": 4.021972656
+ "plan_prefix": "",
+ "table": "t2",
+ "rows_for_plan": 101,
+ "cost_for_plan": 0.025832505
}
]
}
@@ -3123,42 +3376,56 @@ explain extended select * from t1 where a in (select pk from t10) {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.006591797,
+ "rows": 4,
+ "rows_after_filter": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.006591797,
+ "rows_read": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
"uses_join_buffering": false
}
}
},
{
"best_access_path": {
- "table": "t10",
+ "table": "t2",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 10,
- "cost": 2.021972656,
+ "rows": 101,
+ "rows_after_filter": 101,
+ "rows_out": 101,
+ "cost": 0.025832505,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 10,
- "cost": 2.021972656,
+ "rows_read": 101,
+ "rows_out": 101,
+ "cost": 0.025832505,
"uses_join_buffering": false
}
}
@@ -3166,30 +3433,37 @@ explain extended select * from t1 where a in (select pk from t10) {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
- "rows_for_plan": 3,
- "cost_for_plan": 2.606591797,
+ "rows_for_plan": 4,
+ "cost_for_plan": 0.01066122,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t1"],
+ "plan_prefix": "t1",
"get_costs_for_tables": [
{
"best_access_path": {
- "table": "t10",
+ "table": "t2",
+ "plan_details": {
+ "record_count": 4
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 10,
- "cost": 2.021972656,
+ "access_type": "scan_with_join_cache",
+ "rows": 101,
+ "rows_after_filter": 101,
+ "rows_out": 101,
+ "cost": 0.063593833,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 10,
- "cost": 2.021972656,
+ "rows_read": 101,
+ "rows_out": 101,
+ "cost": 0.063593833,
"uses_join_buffering": true
}
}
@@ -3197,38 +3471,47 @@ explain extended select * from t1 where a in (select pk from t10) {
]
},
{
- "plan_prefix": ["t1"],
- "table": "t10",
- "rows_for_plan": 30,
- "cost_for_plan": 10.62856445,
+ "plan_prefix": "t1",
+ "table": "t2",
+ "rows_for_plan": 404,
+ "cost_for_plan": 0.074255053,
"semijoin_strategy_choice": [
{
"strategy": "FirstMatch",
- "records": 3,
- "read_time": 10.62856445
+ "rows": 4,
+ "cost": 0.074255053
},
{
"strategy": "SJ-Materialization",
- "records": 3,
- "read_time": 5.278564453
+ "rows": 4,
+ "cost": 0.078768645
},
{
"strategy": "DuplicateWeedout",
- "records": 3,
- "read_time": 27.12856445
+ "prefix_row_count": 4,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 101,
+ "rows": 4,
+ "dups_cost": 0.074255053,
+ "write_cost": 0.02564388,
+ "full_lookup_cost": 0.06503188,
+ "total_cost": 0.164930813
},
{
- "chosen_strategy": "SJ-Materialization"
+ "chosen_strategy": "FirstMatch"
}
- ]
+ ],
+ "sj_rows_out": 1,
+ "sj_rows_for_plan": 4,
+ "sj_filtered": 0.99009901
}
]
},
{
- "plan_prefix": [],
- "table": "t10",
- "rows_for_plan": 10,
- "cost_for_plan": 4.021972656,
+ "plan_prefix": "",
+ "table": "t2",
+ "rows_for_plan": 101,
+ "cost_for_plan": 0.025832505,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
@@ -3237,46 +3520,43 @@ explain extended select * from t1 where a in (select pk from t10) {
{
"fix_semijoin_strategies_for_picked_join_order": [
{
- "semi_join_strategy": "SJ-Materialization",
+ "semi_join_strategy": "FirstMatch",
"join_order": [
{
- "table": "t10"
+ "table": "t2"
}
]
}
]
},
{
- "best_join_order": ["t1", "<subquery2>"]
+ "best_join_order": ["t1", "t2"],
+ "rows": 4,
+ "cost": 0.074255053
},
{
"substitute_best_equal": {
"condition": "WHERE",
- "resulting_condition": "1"
+ "resulting_condition": "t2.p = t1.a"
}
},
{
- "condition_on_constant_tables": "1",
- "computing_condition": []
- },
- {
"attaching_conditions_to_tables": {
"attached_conditions_computation": [],
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
},
{
- "table": "t10",
- "attached": null
- },
- {
- "table": "<subquery2>",
- "attached": null
+ "table": "t2",
+ "attached_condition": "t2.p = t1.a"
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -3289,12 +3569,10 @@ explain extended select * from t1 where a in (select pk from t10) {
}
]
} 0 0
-drop table t0,t1,t11,t10,t12,t2;
+drop table t1,t2;
#
# Selectivities for columns and indexes.
#
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 (
pk int,
a int,
@@ -3302,7 +3580,7 @@ b int,
key pk(pk),
key pk_a(pk,a),
key pk_a_b(pk,a,b));
-insert into t1 select a,a,a from t0;
+insert into t1 select seq,seq,seq from seq_0_to_9;
ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a,b) INDEXES ();
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
@@ -3416,7 +3694,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"range_analysis": {
"table_scan": {
"rows": 10,
- "cost": 6.031738281
+ "cost": 0.01159965
},
"potential_range_indexes": [
{
@@ -3437,7 +3715,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
],
"best_covering_index_scan": {
"index": "pk_a_b",
- "cost": 3.010739566,
+ "cost": 0.007173242,
"chosen": true
},
"setup_range_conditions": [],
@@ -3450,7 +3728,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"using_mrr": false,
"index_only": false,
"rows": 1,
- "cost": 1.345585794,
+ "cost": 0.002574553,
"chosen": true
},
{
@@ -3460,7 +3738,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"using_mrr": false,
"index_only": false,
"rows": 1,
- "cost": 1.345829876,
+ "cost": 0.002574553,
"chosen": false,
"cause": "cost"
},
@@ -3471,7 +3749,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"using_mrr": false,
"index_only": true,
"rows": 1,
- "cost": 0.346073957,
+ "cost": 0.001478954,
"chosen": true
}
],
@@ -3479,10 +3757,10 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"intersecting_indexes": [
{
"index": "pk",
- "index_scan_cost": 1.000585794,
- "cumulated_index_scan_cost": 1.000585794,
- "disk_sweep_cost": 0.90078125,
- "cumulative_total_cost": 1.901367044,
+ "index_scan_cost": 0.000806227,
+ "cumulated_index_scan_cost": 0.000806227,
+ "disk_sweep_cost": 0.001143284,
+ "cumulative_total_cost": 0.001949511,
"usable": true,
"matching_rows_now": 1,
"intersect_covering_with_this_index": false,
@@ -3520,7 +3798,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"ranges": ["(2,5,1) <= (pk,a,b) <= (2,5,1)"]
},
"rows_for_plan": 1,
- "cost_for_plan": 0.346073957,
+ "cost_for_plan": 0.001478954,
"chosen": true
}
}
@@ -3530,17 +3808,17 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"rowid_filters": [
{
"key": "pk",
- "build_cost": 0.130585794,
+ "build_cost": 0.000899465,
"rows": 1
},
{
"key": "pk_a",
- "build_cost": 0.130829876,
+ "build_cost": 0.000899465,
"rows": 1
},
{
"key": "pk_a_b",
- "build_cost": 0.131073957,
+ "build_cost": 0.000899465,
"rows": 1
}
]
@@ -3571,18 +3849,21 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "ref",
"index": "pk",
"used_range_estimates": true,
"rows": 1,
- "cost": 1.125585794,
+ "cost": 0.002024411,
"chosen": true
},
{
@@ -3590,7 +3871,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"index": "pk_a",
"used_range_estimates": true,
"rows": 1,
- "cost": 1.125829876,
+ "cost": 0.002024411,
"chosen": false,
"cause": "cost"
},
@@ -3599,7 +3880,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"index": "pk_a_b",
"used_range_estimates": true,
"rows": 1,
- "cost": 0.126073957,
+ "cost": 0.000928812,
"chosen": true
},
{
@@ -3610,8 +3891,9 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
],
"chosen_access_method": {
"type": "ref",
- "records": 1,
- "cost": 0.126073957,
+ "rows_read": 1,
+ "rows_out": 1,
+ "cost": 0.000928812,
"uses_join_buffering": false
}
}
@@ -3619,15 +3901,17 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 1,
- "cost_for_plan": 0.326073957
+ "cost_for_plan": 0.000928812
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 1,
+ "cost": 0.000928812
},
{
"substitute_best_equal": {
@@ -3641,10 +3925,13 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -3659,7 +3946,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1 {
} 0 0
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
set @@use_stat_tables= @save_use_stat_tables;
-drop table t0,t1;
+drop table t1;
set optimizer_trace="enabled=off";
#
# Tests added to show that sub-statements are not traced
@@ -3680,8 +3967,6 @@ declare a int default 0;
select count(*) from t2 into a;
return a;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
set optimizer_trace='enabled=on';
select f1(a) from t1;
f1(a)
@@ -3723,7 +4008,8 @@ select f1(a) from t1 {
"table": "t1",
"table_scan": {
"rows": 4,
- "cost": 2.006835938
+ "read_cost": 0.01053322,
+ "read_and_compare_cost": 0.01066122
}
}
]
@@ -3731,23 +4017,30 @@ select f1(a) from t1 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 4,
- "cost": 2.006835938,
+ "rows": 4,
+ "rows_after_filter": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 4,
- "cost": 2.006835938,
+ "rows_read": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
"uses_join_buffering": false
}
}
@@ -3755,15 +4048,17 @@ select f1(a) from t1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 4,
- "cost_for_plan": 2.806835937
+ "cost_for_plan": 0.01066122
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 4,
+ "cost": 0.01066122
},
{
"attaching_conditions_to_tables": {
@@ -3771,10 +4066,13 @@ select f1(a) from t1 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -3827,7 +4125,8 @@ select f2(a) from t1 {
"table": "t1",
"table_scan": {
"rows": 4,
- "cost": 2.006835938
+ "read_cost": 0.01053322,
+ "read_and_compare_cost": 0.01066122
}
}
]
@@ -3835,23 +4134,30 @@ select f2(a) from t1 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 4,
- "cost": 2.006835938,
+ "rows": 4,
+ "rows_after_filter": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 4,
- "cost": 2.006835938,
+ "rows_read": 4,
+ "rows_out": 4,
+ "cost": 0.01066122,
"uses_join_buffering": false
}
}
@@ -3859,15 +4165,17 @@ select f2(a) from t1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 4,
- "cost_for_plan": 2.806835937
+ "cost_for_plan": 0.01066122
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 4,
+ "cost": 0.01066122
},
{
"attaching_conditions_to_tables": {
@@ -3875,10 +4183,13 @@ select f2(a) from t1 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -3908,7 +4219,7 @@ a
2
select length(trace) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
length(trace)
-2360
+2819
set optimizer_trace_max_mem_size=100;
select * from t1;
a
@@ -3922,7 +4233,7 @@ select * from t1 {
"join_preparation": {
"select_id": 1,
"steps": [
- 2260 0
+ 2719 0
set optimizer_trace_max_mem_size=0;
select * from t1;
a
@@ -3930,7 +4241,7 @@ a
2
select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
-select * from t1 2360 0
+select * from t1 2819 0
drop table t1;
set optimizer_trace='enabled=off';
set @@optimizer_trace_max_mem_size= @save_optimizer_trace_max_mem_size;
@@ -3955,7 +4266,7 @@ explain delete from t0 where t0.a<3 {
"range_analysis": {
"table_scan": {
"rows": 10,
- "cost": 6.021972656
+ "cost": 0.01159965
},
"potential_range_indexes": [
{
@@ -3974,7 +4285,7 @@ explain delete from t0 where t0.a<3 {
"using_mrr": false,
"index_only": false,
"rows": 3,
- "cost": 3.746757383,
+ "cost": 0.005042291,
"chosen": true
}
],
@@ -3992,7 +4303,7 @@ explain delete from t0 where t0.a<3 {
"ranges": ["(NULL) < (a) < (3)"]
},
"rows_for_plan": 3,
- "cost_for_plan": 3.746757383,
+ "cost_for_plan": 0.005042291,
"chosen": true
}
}
@@ -4095,7 +4406,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"range_analysis": {
"table_scan": {
"rows": 10,
- "cost": 6.021972656
+ "cost": 0.01159965
},
"potential_range_indexes": [
{
@@ -4106,7 +4417,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
],
"best_covering_index_scan": {
"index": "a",
- "cost": 3.005857945,
+ "cost": 0.007173242,
"chosen": true
},
"setup_range_conditions": [],
@@ -4119,7 +4430,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"using_mrr": false,
"index_only": true,
"rows": 3,
- "cost": 0.746757383,
+ "cost": 0.001755494,
"chosen": true
}
],
@@ -4140,7 +4451,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"ranges": ["(NULL) < (a) < (3)"]
},
"rows_for_plan": 3,
- "cost_for_plan": 0.746757383,
+ "cost_for_plan": 0.001755494,
"chosen": true
}
}
@@ -4160,7 +4471,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"range_analysis": {
"table_scan": {
"rows": 10,
- "cost": 6.021972656
+ "cost": 0.01159965
},
"potential_range_indexes": [
{
@@ -4171,7 +4482,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
],
"best_covering_index_scan": {
"index": "a",
- "cost": 3.005857945,
+ "cost": 0.007173242,
"chosen": true
},
"setup_range_conditions": [],
@@ -4184,7 +4495,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"using_mrr": false,
"index_only": true,
"rows": 3,
- "cost": 0.746757383,
+ "cost": 0.001755494,
"chosen": true
}
],
@@ -4205,7 +4516,7 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"ranges": ["(NULL) < (a) < (3)"]
},
"rows_for_plan": 3,
- "cost_for_plan": 0.746757383,
+ "cost_for_plan": 0.001755494,
"chosen": true
}
}
@@ -4225,23 +4536,30 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t0",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "range",
- "resulting_rows": 3,
- "cost": 0.746757383,
+ "range_index": "a",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.001755494,
"chosen": true
}
],
"chosen_access_method": {
"type": "range",
- "records": 3,
- "cost": 0.746757383,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.001755494,
"uses_join_buffering": false
}
}
@@ -4249,18 +4567,25 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "range",
- "resulting_rows": 3,
- "cost": 0.746757383,
+ "range_index": "a",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.001755494,
"chosen": true
}
],
"chosen_access_method": {
"type": "range",
- "records": 3,
- "cost": 0.746757383,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.001755494,
"uses_join_buffering": false
}
}
@@ -4268,26 +4593,28 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t0",
"rows_for_plan": 3,
- "cost_for_plan": 1.346757383,
+ "cost_for_plan": 0.001755494,
"rest_of_plan": [
{
- "plan_prefix": ["t0"],
+ "plan_prefix": "t0",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
"access_type": "ref",
"index": "a",
"used_range_estimates": false,
"reason": "not better than ref estimates",
- "rowid_filter_skipped": "cost_factor <= 0",
"rows": 1,
- "cost": 3.001757383,
+ "cost": 0.002376836,
"chosen": true
},
{
@@ -4298,8 +4625,9 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
],
"chosen_access_method": {
"type": "ref",
- "records": 1,
- "cost": 3.001757383,
+ "rows_read": 1,
+ "rows_out": 1,
+ "cost": 0.002376836,
"uses_join_buffering": false
}
}
@@ -4307,25 +4635,28 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
]
},
{
- "plan_prefix": ["t0"],
+ "plan_prefix": "t0",
"table": "t1",
"rows_for_plan": 3,
- "cost_for_plan": 4.948514767
+ "cost_for_plan": 0.00413233
}
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 3,
- "cost_for_plan": 1.346757383,
+ "cost_for_plan": 0.001755494,
"rest_of_plan": [
{
- "plan_prefix": ["t1"],
+ "plan_prefix": "t1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t0",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
"access_type": "ref",
@@ -4333,9 +4664,8 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"rec_per_key_stats_missing": true,
"used_range_estimates": false,
"reason": "not better than ref estimates",
- "rowid_filter_skipped": "worst/max seeks clipping",
- "rows": 2,
- "cost": 3.003514767,
+ "rows": 1.166666667,
+ "cost": 0.002392836,
"chosen": true
},
{
@@ -4346,8 +4676,9 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
],
"chosen_access_method": {
"type": "ref",
- "records": 2,
- "cost": 3.003514767,
+ "rows_read": 1.166666667,
+ "rows_out": 1.166666667,
+ "cost": 0.002392836,
"uses_join_buffering": false
}
}
@@ -4355,20 +4686,22 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
]
},
{
- "plan_prefix": ["t1"],
+ "plan_prefix": "t1",
"table": "t0",
- "rows_for_plan": 6,
- "cost_for_plan": 5.55027215,
+ "rows_for_plan": 3.5,
+ "cost_for_plan": 0.00414833,
"pruned_by_cost": true,
- "current_cost": 5.55027215,
- "best_cost": 4.948514767
+ "current_cost": 0.00414833,
+ "best_cost": 0.00413233
}
]
}
]
},
{
- "best_join_order": ["t0", "t1"]
+ "best_join_order": ["t0", "t1"],
+ "rows": 3,
+ "cost": 0.00413233
},
{
"substitute_best_equal": {
@@ -4382,14 +4715,17 @@ explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
"attached_conditions_summary": [
{
"table": "t0",
- "attached": "t0.a < 3 and t0.a is not null"
+ "attached_condition": "t0.a < 3 and t0.a is not null"
},
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -4477,7 +4813,8 @@ explain select * from (select rand() from t1)q {
"table": "t1",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
}
]
@@ -4485,23 +4822,30 @@ explain select * from (select rand() from t1)q {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -4509,15 +4853,17 @@ explain select * from (select rand() from t1)q {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953
+ "cost_for_plan": 0.010504815
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 3,
+ "cost": 0.010504815
},
{
"attaching_conditions_to_tables": {
@@ -4525,10 +4871,13 @@ explain select * from (select rand() from t1)q {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -4549,7 +4898,8 @@ explain select * from (select rand() from t1)q {
"table": "<derived2>",
"table_scan": {
"rows": 3,
- "cost": 3
+ "read_cost": 0.01235805,
+ "read_and_compare_cost": 0.012461052
}
}
]
@@ -4557,23 +4907,30 @@ explain select * from (select rand() from t1)q {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "<derived2>",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 3,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.012461052,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 3,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.012461052,
"uses_join_buffering": false
}
}
@@ -4581,15 +4938,17 @@ explain select * from (select rand() from t1)q {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "<derived2>",
"rows_for_plan": 3,
- "cost_for_plan": 3.6
+ "cost_for_plan": 0.012461052
}
]
},
{
- "best_join_order": ["<derived2>"]
+ "best_join_order": ["<derived2>"],
+ "rows": 3,
+ "cost": 0.012461052
},
{
"attaching_conditions_to_tables": {
@@ -4597,10 +4956,13 @@ explain select * from (select rand() from t1)q {
"attached_conditions_summary": [
{
"table": "<derived2>",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -4741,21 +5103,24 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
"table": "t1",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
},
{
"table": "t_inner_1",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
},
{
"table": "t_inner_2",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
}
]
@@ -4771,23 +5136,30 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -4795,18 +5167,25 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -4814,29 +5193,36 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_1",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"rest_of_plan": [
{
- "plan_prefix": ["t_inner_1"],
+ "plan_prefix": "t_inner_1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
"uses_join_buffering": true
}
}
@@ -4844,18 +5230,18 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
]
},
{
- "plan_prefix": ["t_inner_1"],
+ "plan_prefix": "t_inner_1",
"table": "t_inner_2",
"rows_for_plan": 9,
- "cost_for_plan": 6.410253906
+ "cost_for_plan": 0.022028022
}
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_2",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"pruned_by_heuristic": true
}
]
@@ -4866,23 +5252,30 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -4890,18 +5283,25 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
{
"best_access_path": {
"table": "t_inner_1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -4909,18 +5309,25 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -4928,30 +5335,37 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t1"],
+ "plan_prefix": "t1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_1",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
"uses_join_buffering": true
}
}
@@ -4959,18 +5373,25 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
"uses_join_buffering": true
}
}
@@ -4978,30 +5399,37 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
]
},
{
- "plan_prefix": ["t1"],
+ "plan_prefix": "t1",
"table": "t_inner_1",
"rows_for_plan": 9,
- "cost_for_plan": 6.410253906,
+ "cost_for_plan": 0.022028022,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t1", "t_inner_1"],
+ "plan_prefix": "t1,t_inner_1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.015203373,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.015203373,
"uses_join_buffering": true
}
}
@@ -5009,56 +5437,65 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
]
},
{
- "plan_prefix": ["t1", "t_inner_1"],
+ "plan_prefix": "t1,t_inner_1",
"table": "t_inner_2",
"rows_for_plan": 27,
- "cost_for_plan": 13.81538086,
+ "cost_for_plan": 0.037231395,
"semijoin_strategy_choice": [
{
"strategy": "FirstMatch",
- "records": 3,
- "read_time": 33.86665039
+ "rows": 3,
+ "cost": 0.136562595
},
{
"strategy": "SJ-Materialization",
- "records": 3,
- "read_time": 7.215380859
+ "rows": 3,
+ "cost": 0.059588485
},
{
"strategy": "DuplicateWeedout",
- "records": 3,
- "read_time": 18.31538086
+ "prefix_row_count": 3,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 9,
+ "rows": 3,
+ "dups_cost": 0.037231395,
+ "write_cost": 0.02548291,
+ "full_lookup_cost": 0.00434619,
+ "total_cost": 0.067060495
},
{
"chosen_strategy": "SJ-Materialization"
}
- ]
+ ],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 3,
+ "sj_filtered": 11.11111111
}
]
},
{
- "plan_prefix": ["t1"],
+ "plan_prefix": "t1",
"table": "t_inner_2",
"rows_for_plan": 9,
- "cost_for_plan": 6.410253906,
+ "cost_for_plan": 0.022028022,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_1",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_2",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
@@ -5080,7 +5517,9 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
]
},
{
- "best_join_order": ["t1", "<subquery2>"]
+ "best_join_order": ["t1", "<subquery2>"],
+ "rows": 3,
+ "cost": 0.059588485
},
{
"substitute_best_equal": {
@@ -5089,31 +5528,35 @@ explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_
}
},
{
- "condition_on_constant_tables": "1",
- "computing_condition": []
- },
- {
"attaching_conditions_to_tables": {
- "attached_conditions_computation": [],
+ "attached_conditions_computation": [
+ {
+ "condition_on_constant_tables": "1",
+ "computing_condition": []
+ }
+ ],
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
},
{
"table": "t_inner_1",
- "attached": null
+ "attached_condition": null
},
{
"table": "t_inner_2",
- "attached": null
+ "attached_condition": null
},
{
"table": "<subquery2>",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -5295,42 +5738,48 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
"table": "t_outer_1",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
},
{
"table": "t_outer_2",
"table_scan": {
"rows": 9,
- "cost": 2.015380859
+ "read_cost": 0.011155245,
+ "read_and_compare_cost": 0.011443245
}
},
{
"table": "t_inner_2",
"table_scan": {
"rows": 9,
- "cost": 2.015380859
+ "read_cost": 0.011155245,
+ "read_and_compare_cost": 0.011443245
}
},
{
"table": "t_inner_1",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
},
{
"table": "t_inner_3",
"table_scan": {
"rows": 9,
- "cost": 2.015380859
+ "read_cost": 0.011155245,
+ "read_and_compare_cost": 0.011443245
}
},
{
"table": "t_inner_4",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
}
]
@@ -5353,23 +5802,30 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_outer_1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -5377,18 +5833,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -5396,18 +5859,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
"uses_join_buffering": false
}
}
@@ -5415,18 +5885,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_outer_2",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
"uses_join_buffering": false
}
}
@@ -5434,18 +5911,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -5453,18 +5937,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
"uses_join_buffering": false
}
}
@@ -5472,30 +5963,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_outer_1",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_1",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
"uses_join_buffering": true
}
}
@@ -5503,18 +6001,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
"uses_join_buffering": true
}
}
@@ -5522,18 +6027,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_outer_2",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
"uses_join_buffering": true
}
}
@@ -5541,18 +6053,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
"uses_join_buffering": true
}
}
@@ -5560,18 +6079,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
"uses_join_buffering": true
}
}
@@ -5579,30 +6105,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"table": "t_outer_2",
"rows_for_plan": 27,
- "cost_for_plan": 10.02050781,
+ "cost_for_plan": 0.02463804,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_outer_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.050443503,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.050443503,
"uses_join_buffering": true
}
}
@@ -5610,18 +6143,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_1",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.024600489,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.024600489,
"uses_join_buffering": true
}
}
@@ -5629,18 +6169,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.024600489,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.024600489,
"uses_join_buffering": true
}
}
@@ -5648,18 +6195,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.050443503,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.050443503,
"uses_join_buffering": true
}
}
@@ -5667,30 +6221,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_outer_2",
"table": "t_inner_1",
"rows_for_plan": 81,
- "cost_for_plan": 28.22563477,
+ "cost_for_plan": 0.049238529,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1", "t_outer_2", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
"uses_join_buffering": true
}
}
@@ -5698,18 +6259,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.067582275,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.067582275,
"uses_join_buffering": true
}
}
@@ -5717,18 +6285,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
"uses_join_buffering": true
}
}
@@ -5736,49 +6311,60 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1",
"table": "t_inner_2",
"rows_for_plan": 729,
- "cost_for_plan": 176.0410156,
+ "cost_for_plan": 0.222053862,
"semijoin_strategy_choice": [
{
"strategy": "FirstMatch",
- "records": 27,
- "read_time": 389.4047852
+ "rows": 27,
+ "cost": 1.23517089
},
{
"strategy": "DuplicateWeedout",
- "records": 27,
- "read_time": 289.4410156
+ "prefix_row_count": 27,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 0.222053862,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 0.368747182
},
{
"chosen_strategy": "DuplicateWeedout"
}
],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704,
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.034460781,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.034460781,
"uses_join_buffering": true
}
}
@@ -5786,18 +6372,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.080024379,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.080024379,
"uses_join_buffering": true
}
}
@@ -5805,41 +6398,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_2",
"table": "t_inner_4",
"rows_for_plan": 81,
- "cost_for_plan": 307.6461426,
+ "cost_for_plan": 0.403207963,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_2,t_inner_4",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
"uses_join_buffering": true
}
}
@@ -5847,79 +6436,79 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_2,t_inner_4",
"table": "t_inner_3",
"rows_for_plan": 729,
- "cost_for_plan": 455.4615234,
+ "cost_for_plan": 0.664765924,
"semijoin_strategy_choice": [
{
"strategy": "FirstMatch",
- "records": 27,
- "read_time": 668.825293
+ "rows": 27,
+ "cost": 1.579280032
},
{
"strategy": "DuplicateWeedout",
- "records": 27,
- "read_time": 568.8615234
+ "prefix_row_count": 27,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 0.664765924,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 0.811459244
},
{
"chosen_strategy": "DuplicateWeedout"
}
- ]
+ ],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704
}
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_2",
"table": "t_inner_3",
"rows_for_plan": 243,
- "cost_for_plan": 340.0563965,
+ "cost_for_plan": 0.448771561,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1",
"table": "t_inner_4",
"rows_for_plan": 243,
- "cost_for_plan": 78.83076172,
+ "cost_for_plan": 0.116820804,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_4",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 243
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
"uses_join_buffering": true
}
}
@@ -5927,18 +6516,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 243
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
"uses_join_buffering": true
}
}
@@ -5946,41 +6542,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_4",
"table": "t_inner_2",
"rows_for_plan": 2187,
- "cost_for_plan": 518.2461426,
+ "cost_for_plan": 0.745494255,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_4",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_4,t_inner_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 2187
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 6.764540577,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 6.764540577,
"uses_join_buffering": true
}
}
@@ -5988,113 +6580,118 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_4",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_4,t_inner_2",
"table": "t_inner_3",
"rows_for_plan": 19683,
- "cost_for_plan": 4456.861523,
+ "cost_for_plan": 7.510034832,
"semijoin_strategy_choice": [
{
"strategy": "FirstMatch",
- "records": 27,
- "read_time": 9562.749707
+ "rows": 27,
+ "cost": 28.96624341
},
{
"strategy": "DuplicateWeedout",
- "records": 27,
- "read_time": 7413.361523
+ "prefix_row_count": 27,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 729,
+ "rows": 27,
+ "dups_cost": 7.510034832,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 3.16837251,
+ "total_cost": 10.70775353
},
{
"chosen_strategy": "FirstMatch"
}
],
+ "sj_rows_out": 0.012345679,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 0.137174211,
"pruned_by_cost": true,
- "current_cost": 9562.749707,
- "best_cost": 568.8615234
+ "current_cost": 28.96624341,
+ "best_cost": 0.811459244
}
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_4",
"table": "t_inner_3",
"rows_for_plan": 2187,
- "cost_for_plan": 518.2461426,
+ "cost_for_plan": 0.745494255,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1",
"table": "t_inner_3",
"rows_for_plan": 729,
- "cost_for_plan": 176.0410156,
+ "cost_for_plan": 0.222053862,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": "min_read_time"
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_outer_2",
"table": "t_inner_2",
"rows_for_plan": 243,
- "cost_for_plan": 60.63588867,
+ "cost_for_plan": 0.075081543,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_outer_2",
"table": "t_inner_4",
"rows_for_plan": 81,
- "cost_for_plan": 28.22563477,
+ "cost_for_plan": 0.049238529,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_outer_2",
"table": "t_inner_3",
"rows_for_plan": 243,
- "cost_for_plan": 60.63588867,
+ "cost_for_plan": 0.075081543,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"table": "t_inner_1",
"rows_for_plan": 9,
- "cost_for_plan": 6.410253906,
+ "cost_for_plan": 0.022028022,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_inner_1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
"uses_join_buffering": true
}
}
@@ -6102,18 +6699,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_outer_2",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
"uses_join_buffering": true
}
}
@@ -6121,18 +6725,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.015203373,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.015203373,
"uses_join_buffering": true
}
}
@@ -6140,18 +6751,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
"uses_join_buffering": true
}
}
@@ -6159,30 +6777,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_inner_1",
"table": "t_outer_2",
"rows_for_plan": 81,
- "cost_for_plan": 24.62563477,
+ "cost_for_plan": 0.046471353,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
"uses_join_buffering": true
}
}
@@ -6190,18 +6815,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.067582275,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.067582275,
"uses_join_buffering": true
}
}
@@ -6209,18 +6841,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
"uses_join_buffering": true
}
}
@@ -6228,44 +6867,55 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2",
"table": "t_inner_2",
"rows_for_plan": 729,
- "cost_for_plan": 172.4410156,
+ "cost_for_plan": 0.219286686,
"semijoin_strategy_choice": [
{
"strategy": "DuplicateWeedout",
- "records": 27,
- "read_time": 285.8410156
+ "prefix_row_count": 3,
+ "tmp_table_rows": 9,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 0.219286686,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 0.365980006
},
{
"chosen_strategy": "DuplicateWeedout"
}
],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704,
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_outer_2",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.034460781,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.034460781,
"uses_join_buffering": true
}
}
@@ -6273,18 +6923,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.080024379,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.080024379,
"uses_join_buffering": true
}
}
@@ -6292,41 +6949,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_outer_2",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_2",
"table": "t_inner_4",
"rows_for_plan": 81,
- "cost_for_plan": 304.0461426,
+ "cost_for_plan": 0.400440787,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_outer_2",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_2,t_inner_4",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
"uses_join_buffering": true
}
}
@@ -6334,79 +6987,79 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_outer_2",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_2,t_inner_4",
"table": "t_inner_3",
"rows_for_plan": 729,
- "cost_for_plan": 451.8615234,
+ "cost_for_plan": 0.661998748,
"semijoin_strategy_choice": [
{
"strategy": "FirstMatch",
- "records": 27,
- "read_time": 665.225293
+ "rows": 27,
+ "cost": 1.576512856
},
{
"strategy": "DuplicateWeedout",
- "records": 27,
- "read_time": 565.2615234
+ "prefix_row_count": 27,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 0.661998748,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 0.808692068
},
{
"chosen_strategy": "DuplicateWeedout"
}
- ]
+ ],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704
}
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_outer_2",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_2",
"table": "t_inner_3",
"rows_for_plan": 243,
- "cost_for_plan": 336.4563965,
+ "cost_for_plan": 0.446004385,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2",
"table": "t_inner_4",
"rows_for_plan": 243,
- "cost_for_plan": 75.23076172,
+ "cost_for_plan": 0.114053628,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_outer_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_4",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 243
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
"uses_join_buffering": true
}
}
@@ -6414,18 +7067,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 243
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
"uses_join_buffering": true
}
}
@@ -6433,41 +7093,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_outer_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_4",
"table": "t_inner_2",
"rows_for_plan": 2187,
- "cost_for_plan": 514.6461426,
+ "cost_for_plan": 0.742727079,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_outer_2",
- "t_inner_4",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_4,t_inner_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 2187
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 6.764540577,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 6.764540577,
"uses_join_buffering": true
}
}
@@ -6475,96 +7131,110 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_outer_2",
- "t_inner_4",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_4,t_inner_2",
"table": "t_inner_3",
"rows_for_plan": 19683,
- "cost_for_plan": 4453.261523,
+ "cost_for_plan": 7.507267656,
"semijoin_strategy_choice": [
{
"strategy": "DuplicateWeedout",
- "records": 27,
- "read_time": 7409.761523
+ "prefix_row_count": 3,
+ "tmp_table_rows": 9,
+ "sj_inner_fanout": 729,
+ "rows": 27,
+ "dups_cost": 7.507267656,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 3.16837251,
+ "total_cost": 10.70498636
},
{
"chosen_strategy": "DuplicateWeedout"
}
],
+ "sj_rows_out": 0.012345679,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 0.137174211,
"pruned_by_cost": true,
- "current_cost": 7409.761523,
- "best_cost": 565.2615234
+ "current_cost": 10.70498636,
+ "best_cost": 0.808692068
}
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_outer_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_4",
"table": "t_inner_3",
"rows_for_plan": 2187,
- "cost_for_plan": 514.6461426,
+ "cost_for_plan": 0.742727079,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2",
"table": "t_inner_3",
"rows_for_plan": 729,
- "cost_for_plan": 172.4410156,
+ "cost_for_plan": 0.219286686,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": "min_read_time"
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_inner_1",
"table": "t_inner_2",
"rows_for_plan": 81,
- "cost_for_plan": 24.62563477,
+ "cost_for_plan": 0.046471353,
"semijoin_strategy_choice": [
{
"strategy": "FirstMatch",
- "records": 3,
- "read_time": 44.75893555
+ "rows": 3,
+ "cost": 0.145008465
},
{
"strategy": "DuplicateWeedout",
- "records": 3,
- "read_time": 37.22563477
+ "prefix_row_count": 3,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 27,
+ "rows": 3,
+ "dups_cost": 0.046471353,
+ "write_cost": 0.02548291,
+ "full_lookup_cost": 0.01303857,
+ "total_cost": 0.084992833
},
{
"chosen_strategy": "DuplicateWeedout"
}
],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 3,
+ "sj_filtered": 3.703703704,
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_outer_2",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.017419989,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.017419989,
"uses_join_buffering": true
}
}
@@ -6572,18 +7242,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.012618795,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.012618795,
"uses_join_buffering": true
}
}
@@ -6591,18 +7268,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.017419989,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.017419989,
"uses_join_buffering": true
}
}
@@ -6610,35 +7294,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2",
"table": "t_outer_2",
"rows_for_plan": 27,
- "cost_for_plan": 44.64101563,
+ "cost_for_plan": 0.102412822,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_outer_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_outer_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.034460781,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.034460781,
"uses_join_buffering": true
}
}
@@ -6646,18 +7332,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.080024379,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.080024379,
"uses_join_buffering": true
}
}
@@ -6665,41 +7358,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_outer_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_outer_2",
"table": "t_inner_4",
"rows_for_plan": 81,
- "cost_for_plan": 62.84614258,
+ "cost_for_plan": 0.136873603,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_outer_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_outer_2,t_inner_4",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
"uses_join_buffering": true
}
}
@@ -6707,79 +7396,79 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_outer_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_outer_2,t_inner_4",
"table": "t_inner_3",
"rows_for_plan": 729,
- "cost_for_plan": 210.6615234,
+ "cost_for_plan": 0.398431564,
"semijoin_strategy_choice": [
{
"strategy": "FirstMatch",
- "records": 27,
- "read_time": 424.025293
+ "rows": 27,
+ "cost": 1.312945672
},
{
"strategy": "DuplicateWeedout",
- "records": 27,
- "read_time": 324.0615234
+ "prefix_row_count": 27,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 0.398431564,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 0.545124884
},
{
"chosen_strategy": "DuplicateWeedout"
}
- ]
+ ],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704
}
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_outer_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_outer_2",
"table": "t_inner_3",
"rows_for_plan": 243,
- "cost_for_plan": 95.25639648,
+ "cost_for_plan": 0.182437201,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2",
"table": "t_inner_4",
"rows_for_plan": 9,
- "cost_for_plan": 41.03076172,
+ "cost_for_plan": 0.097611628,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_inner_4",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_outer_2",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.034303623,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.034303623,
"uses_join_buffering": true
}
}
@@ -6787,18 +7476,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.034303623,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.034303623,
"uses_join_buffering": true
}
}
@@ -6806,41 +7502,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_inner_4",
"table": "t_outer_2",
"rows_for_plan": 81,
- "cost_for_plan": 59.24614258,
+ "cost_for_plan": 0.131915251,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4",
- "t_outer_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_inner_4,t_outer_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
"uses_join_buffering": true
}
}
@@ -6848,79 +7540,84 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4",
- "t_outer_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_inner_4,t_outer_2",
"table": "t_inner_3",
"rows_for_plan": 729,
- "cost_for_plan": 207.0615234,
+ "cost_for_plan": 0.393473212,
"semijoin_strategy_choice": [
{
"strategy": "DuplicateWeedout",
- "records": 27,
- "read_time": 320.4615234
+ "prefix_row_count": 3,
+ "tmp_table_rows": 9,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 0.393473212,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 0.540166532
},
{
"chosen_strategy": "DuplicateWeedout"
}
- ]
+ ],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704
}
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_inner_4",
"table": "t_inner_3",
"rows_for_plan": 81,
- "cost_for_plan": 59.24614258,
+ "cost_for_plan": 0.131915251,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2",
"table": "t_inner_3",
"rows_for_plan": 27,
- "cost_for_plan": 44.64101563,
+ "cost_for_plan": 0.102412822,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_inner_1",
"table": "t_inner_4",
"rows_for_plan": 27,
- "cost_for_plan": 13.81538086,
+ "cost_for_plan": 0.037231395,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_4"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_outer_2",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
"uses_join_buffering": true
}
}
@@ -6928,18 +7625,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
"uses_join_buffering": true
}
}
@@ -6947,18 +7651,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
"uses_join_buffering": true
}
}
@@ -6966,35 +7677,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_4"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4",
"table": "t_outer_2",
"rows_for_plan": 243,
- "cost_for_plan": 64.43076172,
+ "cost_for_plan": 0.102465336,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_4",
- "t_outer_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4,t_outer_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 243
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
"uses_join_buffering": true
}
}
@@ -7002,18 +7715,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 243
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
"uses_join_buffering": true
}
}
@@ -7021,128 +7741,118 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_4",
- "t_outer_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4,t_outer_2",
"table": "t_inner_2",
"rows_for_plan": 2187,
- "cost_for_plan": 503.8461426,
+ "cost_for_plan": 0.731138787,
"semijoin_strategy_choice": [],
"pruned_by_cost": true,
- "current_cost": 503.8461426,
- "best_cost": 320.4615234
+ "current_cost": 0.731138787,
+ "best_cost": 0.540166532
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_4",
- "t_outer_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4,t_outer_2",
"table": "t_inner_3",
"rows_for_plan": 2187,
- "cost_for_plan": 503.8461426,
+ "cost_for_plan": 0.731138787,
"semijoin_strategy_choice": [],
"pruned_by_cost": true,
- "current_cost": 503.8461426,
- "best_cost": 320.4615234
+ "current_cost": 0.731138787,
+ "best_cost": 0.540166532
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_4"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4",
"table": "t_inner_2",
"rows_for_plan": 243,
- "cost_for_plan": 64.43076172,
+ "cost_for_plan": 0.102465336,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_4"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4",
"table": "t_inner_3",
"rows_for_plan": 243,
- "cost_for_plan": 64.43076172,
+ "cost_for_plan": 0.102465336,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_inner_1",
"table": "t_inner_3",
"rows_for_plan": 81,
- "cost_for_plan": 24.62563477,
+ "cost_for_plan": 0.046471353,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"table": "t_inner_2",
"rows_for_plan": 27,
- "cost_for_plan": 10.02050781,
+ "cost_for_plan": 0.02463804,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"table": "t_inner_4",
"rows_for_plan": 9,
- "cost_for_plan": 6.410253906,
+ "cost_for_plan": 0.022028022,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"table": "t_inner_3",
"rows_for_plan": 27,
- "cost_for_plan": 10.02050781,
+ "cost_for_plan": 0.02463804,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_outer_2",
"rows_for_plan": 9,
- "cost_for_plan": 3.815380859,
+ "cost_for_plan": 0.011443245,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_1",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_2",
"rows_for_plan": 9,
- "cost_for_plan": 3.815380859,
+ "cost_for_plan": 0.011443245,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_4",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_3",
"rows_for_plan": 9,
- "cost_for_plan": 3.815380859,
+ "cost_for_plan": 0.011443245,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
@@ -7166,7 +7876,9 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
"t_inner_4",
"t_outer_2",
"t_inner_3"
- ]
+ ],
+ "rows": 27,
+ "cost": 0.540166532
},
{
"substitute_best_equal": {
@@ -7180,30 +7892,33 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
"attached_conditions_summary": [
{
"table": "t_outer_1",
- "attached": null
+ "attached_condition": null
},
{
"table": "t_inner_1",
- "attached": "t_inner_1.a = t_outer_1.a"
+ "attached_condition": "t_inner_1.a = t_outer_1.a"
},
{
"table": "t_inner_2",
- "attached": null
+ "attached_condition": null
},
{
"table": "t_inner_4",
- "attached": null
+ "attached_condition": null
},
{
"table": "t_outer_2",
- "attached": null
+ "attached_condition": null
},
{
"table": "t_inner_3",
- "attached": "t_inner_3.a = t_outer_2.a"
+ "attached_condition": "t_inner_3.a = t_outer_2.a"
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -7221,8 +7936,8 @@ explain select * from t1 t_outer_1,t2 t_outer_2 where t_outer_1.a in (select t_
t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t_outer_1 ALL NULL NULL NULL NULL 3
-1 PRIMARY t_outer_2 ALL NULL NULL NULL NULL 9 Using join buffer (flat, BNL join)
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
+1 PRIMARY t_outer_2 ALL NULL NULL NULL NULL 9 Using join buffer (flat, BNL join)
1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1
2 MATERIALIZED t_inner_1 ALL NULL NULL NULL NULL 3
2 MATERIALIZED t_inner_2 ALL NULL NULL NULL NULL 9 Using join buffer (flat, BNL join)
@@ -7386,42 +8101,48 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
"table": "t_outer_1",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
},
{
"table": "t_outer_2",
"table_scan": {
"rows": 9,
- "cost": 2.015380859
+ "read_cost": 0.011155245,
+ "read_and_compare_cost": 0.011443245
}
},
{
"table": "t_inner_2",
"table_scan": {
"rows": 9,
- "cost": 2.015380859
+ "read_cost": 0.011155245,
+ "read_and_compare_cost": 0.011443245
}
},
{
"table": "t_inner_1",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
},
{
"table": "t_inner_3",
"table_scan": {
"rows": 9,
- "cost": 2.015380859
+ "read_cost": 0.011155245,
+ "read_and_compare_cost": 0.011443245
}
},
{
"table": "t_inner_4",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
}
]
@@ -7442,23 +8163,30 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -7466,18 +8194,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
"uses_join_buffering": false
}
}
@@ -7485,29 +8220,36 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_1",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"rest_of_plan": [
{
- "plan_prefix": ["t_inner_1"],
+ "plan_prefix": "t_inner_1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
"uses_join_buffering": true
}
}
@@ -7515,18 +8257,18 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_inner_1"],
+ "plan_prefix": "t_inner_1",
"table": "t_inner_2",
"rows_for_plan": 27,
- "cost_for_plan": 10.02050781
+ "cost_for_plan": 0.02463804
}
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_2",
"rows_for_plan": 9,
- "cost_for_plan": 3.815380859,
+ "cost_for_plan": 0.011443245,
"pruned_by_heuristic": true
}
]
@@ -7534,23 +8276,30 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -7558,18 +8307,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
"uses_join_buffering": false
}
}
@@ -7577,29 +8333,36 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_4",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"rest_of_plan": [
{
- "plan_prefix": ["t_inner_4"],
+ "plan_prefix": "t_inner_4",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
"uses_join_buffering": true
}
}
@@ -7607,18 +8370,18 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_inner_4"],
+ "plan_prefix": "t_inner_4",
"table": "t_inner_3",
"rows_for_plan": 27,
- "cost_for_plan": 10.02050781
+ "cost_for_plan": 0.02463804
}
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_3",
"rows_for_plan": 9,
- "cost_for_plan": 3.815380859,
+ "cost_for_plan": 0.011443245,
"pruned_by_heuristic": true
}
]
@@ -7629,23 +8392,30 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_outer_1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -7653,18 +8423,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -7672,18 +8449,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
"uses_join_buffering": false
}
}
@@ -7691,18 +8475,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_outer_2",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
"uses_join_buffering": false
}
}
@@ -7710,18 +8501,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -7729,18 +8527,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.011443245,
"uses_join_buffering": false
}
}
@@ -7748,30 +8553,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_outer_1",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_1",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
"uses_join_buffering": true
}
}
@@ -7779,18 +8591,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
"uses_join_buffering": true
}
}
@@ -7798,18 +8617,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_outer_2",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
"uses_join_buffering": true
}
}
@@ -7817,18 +8643,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.011523207,
"uses_join_buffering": true
}
}
@@ -7836,18 +8669,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.014133225,
"uses_join_buffering": true
}
}
@@ -7855,30 +8695,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"table": "t_outer_2",
"rows_for_plan": 27,
- "cost_for_plan": 10.02050781,
+ "cost_for_plan": 0.02463804,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_outer_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.050443503,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.050443503,
"uses_join_buffering": true
}
}
@@ -7886,18 +8733,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_1",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.024600489,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.024600489,
"uses_join_buffering": true
}
}
@@ -7905,18 +8759,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.024600489,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.024600489,
"uses_join_buffering": true
}
}
@@ -7924,18 +8785,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.050443503,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.050443503,
"uses_join_buffering": true
}
}
@@ -7943,30 +8811,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_outer_2",
"table": "t_inner_1",
"rows_for_plan": 81,
- "cost_for_plan": 28.22563477,
+ "cost_for_plan": 0.049238529,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1", "t_outer_2", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
"uses_join_buffering": true
}
}
@@ -7974,18 +8849,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.067582275,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.067582275,
"uses_join_buffering": true
}
}
@@ -7993,18 +8875,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
"uses_join_buffering": true
}
}
@@ -8012,54 +8901,65 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1",
"table": "t_inner_2",
"rows_for_plan": 729,
- "cost_for_plan": 176.0410156,
+ "cost_for_plan": 0.222053862,
"semijoin_strategy_choice": [
{
"strategy": "FirstMatch",
- "records": 27,
- "read_time": 389.4047852
+ "rows": 27,
+ "cost": 1.23517089
},
{
"strategy": "SJ-Materialization",
- "records": 27,
- "read_time": 16.74101562
+ "rows": 27,
+ "cost": 0.083958496
},
{
"strategy": "DuplicateWeedout",
- "records": 27,
- "read_time": 289.4410156
+ "prefix_row_count": 27,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 0.222053862,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 0.368747182
},
{
"chosen_strategy": "SJ-Materialization"
}
],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704,
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.034460781,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.034460781,
"uses_join_buffering": true
}
}
@@ -8067,18 +8967,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.080024379,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.080024379,
"uses_join_buffering": true
}
}
@@ -8086,41 +8993,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_2",
"table": "t_inner_4",
"rows_for_plan": 81,
- "cost_for_plan": 34.94614258,
+ "cost_for_plan": 0.118419277,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_2,t_inner_4",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 81
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
"uses_join_buffering": true
}
}
@@ -8128,135 +9031,132 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_2,t_inner_4",
"table": "t_inner_3",
"rows_for_plan": 729,
- "cost_for_plan": 182.7615234,
+ "cost_for_plan": 0.379977238,
"semijoin_strategy_choice": [
{
"strategy": "FirstMatch",
- "records": 27,
- "read_time": 396.125293
+ "rows": 27,
+ "cost": 1.294491346
},
{
"strategy": "SJ-Materialization",
- "records": 27,
- "read_time": 23.46152344
+ "rows": 27,
+ "cost": 0.143278952
},
{
"strategy": "DuplicateWeedout",
- "records": 27,
- "read_time": 296.1615234
+ "prefix_row_count": 27,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 0.379977238,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 0.526670558
},
{
"chosen_strategy": "SJ-Materialization"
}
- ]
+ ],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704
}
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_outer_2",
- "t_inner_1",
- "t_inner_2"
- ],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1,t_inner_2",
"table": "t_inner_3",
"rows_for_plan": 243,
- "cost_for_plan": 67.35639648,
+ "cost_for_plan": 0.163982875,
"semijoin_strategy_choice": [],
"pruned_by_cost": true,
- "current_cost": 67.35639648,
- "best_cost": 23.46152344
+ "current_cost": 0.163982875,
+ "best_cost": 0.143278952
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1",
"table": "t_inner_4",
"rows_for_plan": 243,
- "cost_for_plan": 78.83076172,
+ "cost_for_plan": 0.116820804,
"semijoin_strategy_choice": [],
- "pruned_by_cost": true,
- "current_cost": 78.83076172,
- "best_cost": 23.46152344
+ "pruned_by_heuristic": true
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_outer_2,t_inner_1",
"table": "t_inner_3",
"rows_for_plan": 729,
- "cost_for_plan": 176.0410156,
+ "cost_for_plan": 0.222053862,
"semijoin_strategy_choice": [],
"pruned_by_cost": true,
- "current_cost": 176.0410156,
- "best_cost": 23.46152344
+ "current_cost": 0.222053862,
+ "best_cost": 0.143278952
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_outer_2",
"table": "t_inner_2",
"rows_for_plan": 243,
- "cost_for_plan": 60.63588867,
+ "cost_for_plan": 0.075081543,
"semijoin_strategy_choice": [],
- "pruned_by_cost": true,
- "current_cost": 60.63588867,
- "best_cost": 23.46152344
+ "pruned_by_heuristic": true
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_outer_2",
"table": "t_inner_4",
"rows_for_plan": 81,
- "cost_for_plan": 28.22563477,
+ "cost_for_plan": 0.049238529,
"semijoin_strategy_choice": [],
- "pruned_by_cost": true,
- "current_cost": 28.22563477,
- "best_cost": 23.46152344
+ "pruned_by_heuristic": true
},
{
- "plan_prefix": ["t_outer_1", "t_outer_2"],
+ "plan_prefix": "t_outer_1,t_outer_2",
"table": "t_inner_3",
"rows_for_plan": 243,
- "cost_for_plan": 60.63588867,
+ "cost_for_plan": 0.075081543,
"semijoin_strategy_choice": [],
- "pruned_by_cost": true,
- "current_cost": 60.63588867,
- "best_cost": 23.46152344
+ "pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"table": "t_inner_1",
"rows_for_plan": 9,
- "cost_for_plan": 6.410253906,
+ "cost_for_plan": 0.022028022,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_inner_1",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_2",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
"uses_join_buffering": true
}
}
@@ -8264,18 +9164,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_outer_2",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
"uses_join_buffering": true
}
}
@@ -8283,18 +9190,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.015203373,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.015203373,
"uses_join_buffering": true
}
}
@@ -8302,18 +9216,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.024443331,
"uses_join_buffering": true
}
}
@@ -8321,59 +9242,351 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_inner_1",
"table": "t_outer_2",
"rows_for_plan": 81,
- "cost_for_plan": 24.62563477,
+ "cost_for_plan": 0.046471353,
"semijoin_strategy_choice": [],
- "pruned_by_cost": true,
- "current_cost": 24.62563477,
- "best_cost": 23.46152344
+ "rest_of_plan": [
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2",
+ "get_costs_for_tables": [
+ {
+ "best_access_path": {
+ "table": "t_inner_2",
+ "plan_details": {
+ "record_count": 81
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
+ "uses_join_buffering": true
+ }
+ }
+ },
+ {
+ "best_access_path": {
+ "table": "t_inner_4",
+ "plan_details": {
+ "record_count": 81
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.067582275,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.067582275,
+ "uses_join_buffering": true
+ }
+ }
+ },
+ {
+ "best_access_path": {
+ "table": "t_inner_3",
+ "plan_details": {
+ "record_count": 81
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.172815333,
+ "uses_join_buffering": true
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2",
+ "table": "t_inner_2",
+ "rows_for_plan": 729,
+ "cost_for_plan": 0.219286686,
+ "semijoin_strategy_choice": [
+ {
+ "strategy": "DuplicateWeedout",
+ "prefix_row_count": 3,
+ "tmp_table_rows": 9,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 0.219286686,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 0.365980006
+ },
+ {
+ "chosen_strategy": "DuplicateWeedout"
+ }
+ ],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704,
+ "pruned_by_cost": true,
+ "current_cost": 0.365980006,
+ "best_cost": 0.143278952
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2",
+ "table": "t_inner_4",
+ "rows_for_plan": 243,
+ "cost_for_plan": 0.114053628,
+ "semijoin_strategy_choice": [],
+ "rest_of_plan": [
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_4",
+ "get_costs_for_tables": [
+ {
+ "best_access_path": {
+ "table": "t_inner_2",
+ "plan_details": {
+ "record_count": 243
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "uses_join_buffering": true
+ }
+ }
+ },
+ {
+ "best_access_path": {
+ "table": "t_inner_3",
+ "plan_details": {
+ "record_count": 243
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "uses_join_buffering": true
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_4",
+ "table": "t_inner_2",
+ "rows_for_plan": 2187,
+ "cost_for_plan": 0.742727079,
+ "semijoin_strategy_choice": [],
+ "pruned_by_cost": true,
+ "current_cost": 0.742727079,
+ "best_cost": 0.143278952
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_4",
+ "table": "t_inner_3",
+ "rows_for_plan": 2187,
+ "cost_for_plan": 0.742727079,
+ "semijoin_strategy_choice": [
+ {
+ "strategy": "SJ-Materialization",
+ "rows": 81,
+ "cost": 0.116338225
+ },
+ {
+ "chosen_strategy": "SJ-Materialization"
+ }
+ ],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 81,
+ "sj_filtered": 3.703703704,
+ "rest_of_plan": [
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_4,t_inner_3",
+ "get_costs_for_tables": [
+ {
+ "best_access_path": {
+ "table": "t_inner_2",
+ "plan_details": {
+ "record_count": 81
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
+ "uses_join_buffering": true
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2,t_inner_4,t_inner_3",
+ "table": "t_inner_2",
+ "rows_for_plan": 729,
+ "cost_for_plan": 0.377896186,
+ "semijoin_strategy_choice": [
+ {
+ "strategy": "DuplicateWeedout",
+ "prefix_row_count": 3,
+ "tmp_table_rows": 9,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 1.00428504,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 1.15097836
+ },
+ {
+ "chosen_strategy": "DuplicateWeedout"
+ }
+ ],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704,
+ "pruned_by_cost": true,
+ "current_cost": 1.15097836,
+ "best_cost": 0.143278952
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_outer_2",
+ "table": "t_inner_3",
+ "rows_for_plan": 729,
+ "cost_for_plan": 0.219286686,
+ "semijoin_strategy_choice": [],
+ "pruned_by_cost": true,
+ "current_cost": 0.219286686,
+ "best_cost": 0.143278952
+ }
+ ]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_inner_1",
"table": "t_inner_2",
"rows_for_plan": 81,
- "cost_for_plan": 24.62563477,
+ "cost_for_plan": 0.046471353,
"semijoin_strategy_choice": [
{
"strategy": "FirstMatch",
- "records": 3,
- "read_time": 44.75893555
+ "rows": 3,
+ "cost": 0.145008465
},
{
"strategy": "SJ-Materialization",
- "records": 3,
- "read_time": 8.125634766
+ "rows": 3,
+ "cost": 0.065137975
},
{
"strategy": "DuplicateWeedout",
- "records": 3,
- "read_time": 37.22563477
+ "prefix_row_count": 3,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 27,
+ "rows": 3,
+ "dups_cost": 0.046471353,
+ "write_cost": 0.02548291,
+ "full_lookup_cost": 0.01303857,
+ "total_cost": 0.084992833
},
{
"chosen_strategy": "SJ-Materialization"
}
],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 3,
+ "sj_filtered": 3.703703704,
"rest_of_plan": [
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_outer_2",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.017419989,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.017419989,
"uses_join_buffering": true
}
}
@@ -8381,18 +9594,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.012618795,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.012618795,
"uses_join_buffering": true
}
}
@@ -8400,18 +9620,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 3
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.017419989,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.017419989,
"uses_join_buffering": true
}
}
@@ -8419,35 +9646,37 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2",
"table": "t_outer_2",
"rows_for_plan": 27,
- "cost_for_plan": 15.54101562,
+ "cost_for_plan": 0.082557964,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_outer_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_outer_2",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_inner_4",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "access_type": "scan_with_join_cache",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.034460781,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.034460781,
"uses_join_buffering": true
}
}
@@ -8455,18 +9684,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 27
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.080024379,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.080024379,
"uses_join_buffering": true
}
}
@@ -8474,67 +9710,124 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_outer_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_outer_2",
"table": "t_inner_4",
"rows_for_plan": 81,
- "cost_for_plan": 33.74614258,
+ "cost_for_plan": 0.117018745,
"semijoin_strategy_choice": [],
- "pruned_by_cost": true,
- "current_cost": 33.74614258,
- "best_cost": 23.46152344
+ "rest_of_plan": [
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_outer_2,t_inner_4",
+ "get_costs_for_tables": [
+ {
+ "best_access_path": {
+ "table": "t_inner_3",
+ "plan_details": {
+ "record_count": 81
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
+ "uses_join_buffering": true
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_outer_2,t_inner_4",
+ "table": "t_inner_3",
+ "rows_for_plan": 729,
+ "cost_for_plan": 0.378576706,
+ "semijoin_strategy_choice": [
+ {
+ "strategy": "FirstMatch",
+ "rows": 27,
+ "cost": 1.293090814
+ },
+ {
+ "strategy": "SJ-Materialization",
+ "rows": 27,
+ "cost": 0.14187842
+ },
+ {
+ "strategy": "DuplicateWeedout",
+ "prefix_row_count": 27,
+ "tmp_table_rows": 1,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 0.378576706,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 0.525270026
+ },
+ {
+ "chosen_strategy": "SJ-Materialization"
+ }
+ ],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704
+ }
+ ]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_outer_2"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_outer_2",
"table": "t_inner_3",
"rows_for_plan": 243,
- "cost_for_plan": 66.15639648,
+ "cost_for_plan": 0.162582343,
"semijoin_strategy_choice": [],
"pruned_by_cost": true,
- "current_cost": 66.15639648,
- "best_cost": 23.46152344
+ "current_cost": 0.162582343,
+ "best_cost": 0.14187842
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2",
"table": "t_inner_4",
"rows_for_plan": 9,
- "cost_for_plan": 11.93076172,
+ "cost_for_plan": 0.07775677,
"semijoin_strategy_choice": [],
"rest_of_plan": [
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_inner_4",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t_outer_2",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.034303623,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.034303623,
"uses_join_buffering": true
}
}
@@ -8542,18 +9835,25 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_access_path": {
"table": "t_inner_3",
+ "plan_details": {
+ "record_count": 9
+ },
"considered_access_paths": [
{
- "access_type": "scan",
- "resulting_rows": 9,
- "cost": 2.015380859,
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.034303623,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 9,
- "cost": 2.015380859,
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.034303623,
"uses_join_buffering": true
}
}
@@ -8561,130 +9861,360 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_inner_4",
"table": "t_outer_2",
"rows_for_plan": 81,
- "cost_for_plan": 30.14614258,
+ "cost_for_plan": 0.112060393,
"semijoin_strategy_choice": [],
- "pruned_by_cost": true,
- "current_cost": 30.14614258,
- "best_cost": 23.46152344
+ "rest_of_plan": [
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_inner_4,t_outer_2",
+ "get_costs_for_tables": [
+ {
+ "best_access_path": {
+ "table": "t_inner_3",
+ "plan_details": {
+ "record_count": 81
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.261557961,
+ "uses_join_buffering": true
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_inner_4,t_outer_2",
+ "table": "t_inner_3",
+ "rows_for_plan": 729,
+ "cost_for_plan": 0.373618354,
+ "semijoin_strategy_choice": [
+ {
+ "strategy": "DuplicateWeedout",
+ "prefix_row_count": 3,
+ "tmp_table_rows": 9,
+ "sj_inner_fanout": 27,
+ "rows": 27,
+ "dups_cost": 0.373618354,
+ "write_cost": 0.02934619,
+ "full_lookup_cost": 0.11734713,
+ "total_cost": 0.520311674
+ },
+ {
+ "chosen_strategy": "DuplicateWeedout"
+ }
+ ],
+ "sj_rows_out": 0.333333333,
+ "sj_rows_for_plan": 27,
+ "sj_filtered": 3.703703704,
+ "pruned_by_cost": true,
+ "current_cost": 0.520311674,
+ "best_cost": 0.14187842
+ }
+ ]
},
{
- "plan_prefix": [
- "t_outer_1",
- "t_inner_1",
- "t_inner_2",
- "t_inner_4"
- ],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2,t_inner_4",
"table": "t_inner_3",
"rows_for_plan": 81,
- "cost_for_plan": 30.14614258,
+ "cost_for_plan": 0.112060393,
"semijoin_strategy_choice": [],
- "pruned_by_cost": true,
- "current_cost": 30.14614258,
- "best_cost": 23.46152344
+ "pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_2",
"table": "t_inner_3",
"rows_for_plan": 27,
- "cost_for_plan": 15.54101562,
+ "cost_for_plan": 0.082557964,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_inner_1",
"table": "t_inner_4",
"rows_for_plan": 27,
- "cost_for_plan": 13.81538086,
+ "cost_for_plan": 0.037231395,
"semijoin_strategy_choice": [],
- "pruned_by_heuristic": true
+ "rest_of_plan": [
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4",
+ "get_costs_for_tables": [
+ {
+ "best_access_path": {
+ "table": "t_outer_2",
+ "plan_details": {
+ "record_count": 27
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
+ "uses_join_buffering": true
+ }
+ }
+ },
+ {
+ "best_access_path": {
+ "table": "t_inner_2",
+ "plan_details": {
+ "record_count": 27
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
+ "uses_join_buffering": true
+ }
+ }
+ },
+ {
+ "best_access_path": {
+ "table": "t_inner_3",
+ "plan_details": {
+ "record_count": 27
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.065233941,
+ "uses_join_buffering": true
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4",
+ "table": "t_outer_2",
+ "rows_for_plan": 243,
+ "cost_for_plan": 0.102465336,
+ "semijoin_strategy_choice": [],
+ "rest_of_plan": [
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4,t_outer_2",
+ "get_costs_for_tables": [
+ {
+ "best_access_path": {
+ "table": "t_inner_2",
+ "plan_details": {
+ "record_count": 243
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "uses_join_buffering": true
+ }
+ }
+ },
+ {
+ "best_access_path": {
+ "table": "t_inner_3",
+ "plan_details": {
+ "record_count": 243
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 9,
+ "rows_after_filter": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 9,
+ "rows_out": 9,
+ "cost": 0.628673451,
+ "uses_join_buffering": true
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4,t_outer_2",
+ "table": "t_inner_2",
+ "rows_for_plan": 2187,
+ "cost_for_plan": 0.731138787,
+ "semijoin_strategy_choice": [],
+ "pruned_by_cost": true,
+ "current_cost": 0.731138787,
+ "best_cost": 0.14187842
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4,t_outer_2",
+ "table": "t_inner_3",
+ "rows_for_plan": 2187,
+ "cost_for_plan": 0.731138787,
+ "semijoin_strategy_choice": [],
+ "pruned_by_cost": true,
+ "current_cost": 0.731138787,
+ "best_cost": 0.14187842
+ }
+ ]
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4",
+ "table": "t_inner_2",
+ "rows_for_plan": 243,
+ "cost_for_plan": 0.102465336,
+ "semijoin_strategy_choice": [],
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": "t_outer_1,t_inner_1,t_inner_4",
+ "table": "t_inner_3",
+ "rows_for_plan": 243,
+ "cost_for_plan": 0.102465336,
+ "semijoin_strategy_choice": [],
+ "pruned_by_heuristic": true
+ }
+ ]
},
{
- "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "plan_prefix": "t_outer_1,t_inner_1",
"table": "t_inner_3",
"rows_for_plan": 81,
- "cost_for_plan": 24.62563477,
+ "cost_for_plan": 0.046471353,
"semijoin_strategy_choice": [],
- "pruned_by_cost": true,
- "current_cost": 24.62563477,
- "best_cost": 23.46152344
+ "pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"table": "t_inner_2",
"rows_for_plan": 27,
- "cost_for_plan": 10.02050781,
+ "cost_for_plan": 0.02463804,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"table": "t_inner_4",
"rows_for_plan": 9,
- "cost_for_plan": 6.410253906,
+ "cost_for_plan": 0.022028022,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": ["t_outer_1"],
+ "plan_prefix": "t_outer_1",
"table": "t_inner_3",
"rows_for_plan": 27,
- "cost_for_plan": 10.02050781,
+ "cost_for_plan": 0.02463804,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_outer_2",
"rows_for_plan": 9,
- "cost_for_plan": 3.815380859,
+ "cost_for_plan": 0.011443245,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_1",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_2",
"rows_for_plan": 9,
- "cost_for_plan": 3.815380859,
+ "cost_for_plan": 0.011443245,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_4",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953,
+ "cost_for_plan": 0.010504815,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t_inner_3",
"rows_for_plan": 9,
- "cost_for_plan": 3.815380859,
+ "cost_for_plan": 0.011443245,
"semijoin_strategy_choice": [],
"pruned_by_heuristic": true
}
@@ -8719,10 +10249,12 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
{
"best_join_order": [
"t_outer_1",
- "t_outer_2",
"<subquery2>",
+ "t_outer_2",
"<subquery3>"
- ]
+ ],
+ "rows": 27,
+ "cost": 0.14187842
},
{
"substitute_best_equal": {
@@ -8731,47 +10263,51 @@ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
}
},
{
- "condition_on_constant_tables": "1",
- "computing_condition": []
- },
- {
"attaching_conditions_to_tables": {
- "attached_conditions_computation": [],
+ "attached_conditions_computation": [
+ {
+ "condition_on_constant_tables": "1",
+ "computing_condition": []
+ }
+ ],
"attached_conditions_summary": [
{
"table": "t_outer_1",
- "attached": null
- },
- {
- "table": "t_outer_2",
- "attached": null
+ "attached_condition": null
},
{
"table": "t_inner_1",
- "attached": null
+ "attached_condition": null
},
{
"table": "t_inner_2",
- "attached": null
+ "attached_condition": null
},
{
"table": "<subquery2>",
- "attached": null
+ "attached_condition": null
+ },
+ {
+ "table": "t_outer_2",
+ "attached_condition": null
},
{
"table": "t_inner_4",
- "attached": null
+ "attached_condition": null
},
{
"table": "t_inner_3",
- "attached": null
+ "attached_condition": null
},
{
"table": "<subquery3>",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -8826,7 +10362,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": true,
"rows": 1,
- "cost": 0.345829876,
+ "cost": 0.001478954,
"chosen": true
}
],
@@ -8855,7 +10391,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": true,
"rows": 107,
- "cost": 21.63379668,
+ "cost": 0.016135574,
"chosen": true
}
],
@@ -8887,7 +10423,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 1000,
- "cost": 1203.877243,
+ "cost": 1.235690484,
"chosen": true
}
],
@@ -8927,7 +10463,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 4,
- "cost": 4.948710032,
+ "cost": 0.00627616,
"chosen": true
}
],
@@ -8961,7 +10497,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 1,
- "cost": 1.346171589,
+ "cost": 0.002574553,
"chosen": true
}
],
@@ -8990,7 +10526,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 1,
- "cost": 1.346171589,
+ "cost": 0.002574553,
"chosen": true
}
],
@@ -9027,7 +10563,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 1,
- "cost": 1.345927508,
+ "cost": 0.002574553,
"chosen": true
}
],
@@ -9057,7 +10593,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 1,
- "cost": 1.345878692,
+ "cost": 0.002574553,
"chosen": true
}
],
@@ -9087,7 +10623,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 1,
- "cost": 1.345927508,
+ "cost": 0.002574553,
"chosen": true
}
],
@@ -9120,7 +10656,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 1,
- "cost": 1.345878692,
+ "cost": 0.002574553,
"chosen": true
}
],
@@ -9156,7 +10692,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 1,
- "cost": 1.394255553,
+ "cost": 0.002574553,
"chosen": true
}
],
@@ -9190,7 +10726,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 2,
- "cost": 2.546855016,
+ "cost": 0.003808422,
"chosen": true
}
],
@@ -9214,6 +10750,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": 1.235690484,
"nested_loop": [
{
"table": {
@@ -9223,7 +10760,9 @@ EXPLAIN
"key": "start_date",
"key_length": "8",
"used_key_parts": ["start_date", "end_date"],
+ "loops": 1,
"rows": 1000,
+ "cost": 1.235690484,
"filtered": 100,
"index_condition": "t1.start_date >= '2019-02-10' and t1.end_date < '2019-04-01'"
}
@@ -9245,7 +10784,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 1000,
- "cost": 1203.877243,
+ "cost": 1.235690484,
"chosen": true
}
],
@@ -9295,28 +10834,35 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
[
[
{
- "plan_prefix":
- [],
+ "plan_prefix": "",
"get_costs_for_tables":
[
{
"best_access_path":
{
"table": "A",
+ "plan_details":
+ {
+ "record_count": 1
+ },
"considered_access_paths":
[
{
"access_type": "scan",
- "resulting_rows": 5,
- "cost": 3.017089844,
+ "rows": 10,
+ "rows_after_filter": 5,
+ "rows_out": 5,
+ "cost": 0.01159965,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method":
{
"type": "scan",
- "records": 5,
- "cost": 3.017089844,
+ "rows_read": 5,
+ "rows_out": 5,
+ "cost": 0.01159965,
"uses_join_buffering": false
}
}
@@ -9325,20 +10871,28 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"best_access_path":
{
"table": "B",
+ "plan_details":
+ {
+ "record_count": 1
+ },
"considered_access_paths":
[
{
"access_type": "scan",
- "resulting_rows": 800,
- "cost": 44.19726562,
+ "rows": 1000,
+ "rows_after_filter": 800,
+ "rows_out": 800,
+ "cost": 0.1669214,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method":
{
"type": "scan",
- "records": 800,
- "cost": 44.19726562,
+ "rows_read": 800,
+ "rows_out": 800,
+ "cost": 0.1669214,
"uses_join_buffering": false
}
}
@@ -9346,36 +10900,42 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
]
},
{
- "plan_prefix":
- [],
+ "plan_prefix": "",
"table": "A",
"rows_for_plan": 5,
- "cost_for_plan": 4.017089844,
+ "cost_for_plan": 0.01159965,
"rest_of_plan":
[
{
- "plan_prefix":
- ["A"],
+ "plan_prefix": "A",
"get_costs_for_tables":
[
{
"best_access_path":
{
"table": "B",
+ "plan_details":
+ {
+ "record_count": 5
+ },
"considered_access_paths":
[
{
"access_type": "scan",
- "resulting_rows": 800,
- "cost": 220.9863281,
+ "rows": 1000,
+ "rows_after_filter": 800,
+ "rows_out": 800,
+ "cost": 0.834607,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method":
{
"type": "scan",
- "records": 800,
- "cost": 220.9863281,
+ "rows_read": 800,
+ "rows_out": 800,
+ "cost": 0.834607,
"uses_join_buffering": false
}
}
@@ -9383,20 +10943,18 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
]
},
{
- "plan_prefix":
- ["A"],
+ "plan_prefix": "A",
"table": "B",
"rows_for_plan": 4000,
- "cost_for_plan": 1025.003418
+ "cost_for_plan": 0.84620665
}
]
},
{
- "plan_prefix":
- [],
+ "plan_prefix": "",
"table": "B",
"rows_for_plan": 800,
- "cost_for_plan": 204.1972656,
+ "cost_for_plan": 0.1669214,
"pruned_by_heuristic": true
}
]
@@ -9412,28 +10970,35 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
[
[
{
- "plan_prefix":
- [],
+ "plan_prefix": "",
"get_costs_for_tables":
[
{
"best_access_path":
{
"table": "A",
+ "plan_details":
+ {
+ "record_count": 1
+ },
"considered_access_paths":
[
{
"access_type": "scan",
- "resulting_rows": 10,
- "cost": 2.017089844,
+ "rows": 10,
+ "rows_after_filter": 10,
+ "rows_out": 10,
+ "cost": 0.01159965,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method":
{
"type": "scan",
- "records": 10,
- "cost": 2.017089844,
+ "rows_read": 10,
+ "rows_out": 10,
+ "cost": 0.01159965,
"uses_join_buffering": false
}
}
@@ -9442,20 +11007,28 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"best_access_path":
{
"table": "B",
+ "plan_details":
+ {
+ "record_count": 1
+ },
"considered_access_paths":
[
{
"access_type": "scan",
- "resulting_rows": 800,
- "cost": 44.19726562,
+ "rows": 1000,
+ "rows_after_filter": 800,
+ "rows_out": 800,
+ "cost": 0.1669214,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method":
{
"type": "scan",
- "records": 800,
- "cost": 44.19726562,
+ "rows_read": 800,
+ "rows_out": 800,
+ "cost": 0.1669214,
"uses_join_buffering": false
}
}
@@ -9463,22 +11036,24 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
]
},
{
- "plan_prefix":
- [],
+ "plan_prefix": "",
"table": "A",
"rows_for_plan": 10,
- "cost_for_plan": 4.017089844,
+ "cost_for_plan": 0.01159965,
"rest_of_plan":
[
{
- "plan_prefix":
- ["A"],
+ "plan_prefix": "A",
"get_costs_for_tables":
[
{
"best_access_path":
{
"table": "B",
+ "plan_details":
+ {
+ "record_count": 10
+ },
"considered_access_paths":
[
{
@@ -9486,23 +11061,22 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"index": "b",
"used_range_estimates": false,
"reason": "not available",
- "rowid_filter_skipped": "cost_factor <= 0",
"rows": 1,
- "cost": 20.00585794,
+ "cost": 0.01901531,
"chosen": true
},
{
- "access_type": "scan",
- "resulting_rows": 800,
- "cost": 44.19726562,
- "chosen": false
+ "type": "scan",
+ "chosen": false,
+ "cause": "cost"
}
],
"chosen_access_method":
{
"type": "ref",
- "records": 1,
- "cost": 20.00585794,
+ "rows_read": 1,
+ "rows_out": 1,
+ "cost": 0.01901531,
"uses_join_buffering": false
}
}
@@ -9510,25 +11084,26 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
]
},
{
- "plan_prefix":
- ["A"],
+ "plan_prefix": "A",
"table": "B",
"rows_for_plan": 10,
- "cost_for_plan": 26.02294779,
+ "cost_for_plan": 0.03061496,
+ "pushdown_cond_selectivity": 0.8,
+ "filtered": 80,
+ "rows_out": 0.8,
"selectivity": 0.8,
"estimated_join_cardinality": 8
}
]
},
{
- "plan_prefix":
- [],
+ "plan_prefix": "",
"table": "B",
"rows_for_plan": 800,
- "cost_for_plan": 204.1972656,
+ "cost_for_plan": 0.1669214,
"pruned_by_cost": true,
- "current_cost": 204.1972656,
- "best_cost": 26.02294779
+ "current_cost": 0.1669214,
+ "best_cost": 0.03061496
}
]
]
@@ -9557,7 +11132,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 1,
- "cost": 1.357887479,
+ "cost": 0.002574553,
"chosen": true
}
],
@@ -9617,7 +11192,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives'))
"using_mrr": false,
"index_only": true,
"rows": 1,
- "cost": 0.345829876,
+ "cost": 0.001478954,
"chosen": true
}
]
@@ -9640,28 +11215,35 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
[
[
{
- "plan_prefix":
- [],
+ "plan_prefix": "",
"get_costs_for_tables":
[
{
"best_access_path":
{
"table": "t1",
+ "plan_details":
+ {
+ "record_count": 1
+ },
"considered_access_paths":
[
{
"access_type": "scan",
- "resulting_rows": 10,
- "cost": 2.021972656,
+ "rows": 10,
+ "rows_after_filter": 10,
+ "rows_out": 10,
+ "cost": 0.01159965,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method":
{
"type": "scan",
- "records": 10,
- "cost": 2.021972656,
+ "rows_read": 10,
+ "rows_out": 10,
+ "cost": 0.01159965,
"uses_join_buffering": false
}
}
@@ -9670,12 +11252,19 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"best_access_path":
{
"table": "t2",
+ "plan_details":
+ {
+ "record_count": 1
+ },
"considered_access_paths":
[
{
"access_type": "scan",
- "resulting_rows": 100,
- "cost": 2.219726562,
+ "rows": 100,
+ "rows_after_filter": 100,
+ "rows_out": 100,
+ "cost": 0.0256761,
+ "index_only": false,
"chosen": true,
"use_tmp_table": true
}
@@ -9683,8 +11272,9 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"chosen_access_method":
{
"type": "scan",
- "records": 100,
- "cost": 2.219726562,
+ "rows_read": 100,
+ "rows_out": 100,
+ "cost": 0.0256761,
"uses_join_buffering": false
}
}
@@ -9692,22 +11282,24 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
]
},
{
- "plan_prefix":
- [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 10,
- "cost_for_plan": 4.021972656,
+ "cost_for_plan": 0.01159965,
"rest_of_plan":
[
{
- "plan_prefix":
- ["t1"],
+ "plan_prefix": "t1",
"get_costs_for_tables":
[
{
"best_access_path":
{
"table": "t2",
+ "plan_details":
+ {
+ "record_count": 10
+ },
"considered_access_paths":
[
{
@@ -9715,23 +11307,22 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"index": "a",
"used_range_estimates": false,
"reason": "not available",
- "rowid_filter_skipped": "cost_factor <= 0",
"rows": 1,
- "cost": 20.00585794,
+ "cost": 0.01840091,
"chosen": true
},
{
- "access_type": "scan",
- "resulting_rows": 100,
- "cost": 2.219726562,
- "chosen": false
+ "type": "scan",
+ "chosen": false,
+ "cause": "cost"
}
],
"chosen_access_method":
{
"type": "ref",
- "records": 1,
- "cost": 20.00585794,
+ "rows_read": 1,
+ "rows_out": 1,
+ "cost": 0.01840091,
"uses_join_buffering": false
}
}
@@ -9739,32 +11330,33 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
]
},
{
- "plan_prefix":
- ["t1"],
+ "plan_prefix": "t1",
"table": "t2",
"rows_for_plan": 10,
- "cost_for_plan": 26.0278306,
- "cost_for_sorting": 10
+ "cost_for_plan": 0.03000056,
+ "cost_for_sorting": 0.006368384
}
]
},
{
- "plan_prefix":
- [],
+ "plan_prefix": "",
"table": "t2",
"rows_for_plan": 100,
- "cost_for_plan": 22.21972656,
+ "cost_for_plan": 0.0256761,
"rest_of_plan":
[
{
- "plan_prefix":
- ["t2"],
+ "plan_prefix": "t2",
"get_costs_for_tables":
[
{
"best_access_path":
{
"table": "t1",
+ "plan_details":
+ {
+ "record_count": 100
+ },
"considered_access_paths":
[
{
@@ -9772,23 +11364,26 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
"index": "a",
"used_range_estimates": false,
"reason": "not available",
- "rowid_filter_skipped": "cost_factor <= 0",
"rows": 1,
- "cost": 200.0585794,
+ "cost": 0.1821659,
"chosen": true
},
{
- "access_type": "scan",
- "resulting_rows": 10,
- "cost": 2.021972656,
+ "access_type": "scan_with_join_cache",
+ "rows": 10,
+ "rows_after_filter": 10,
+ "rows_out": 1,
+ "cost": 0.11055225,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method":
{
"type": "scan",
- "records": 10,
- "cost": 2.021972656,
+ "rows_read": 10,
+ "rows_out": 1,
+ "cost": 0.11055225,
"uses_join_buffering": true
}
}
@@ -9796,14 +11391,13 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans'))
]
},
{
- "plan_prefix":
- ["t2"],
+ "plan_prefix": "t2",
"table": "t1",
- "rows_for_plan": 1000,
- "cost_for_plan": 224.2416992,
+ "rows_for_plan": 100,
+ "cost_for_plan": 0.13622835,
"pruned_by_cost": true,
- "current_cost": 224.2416992,
- "best_cost": 36.0278306
+ "current_cost": 0.13622835,
+ "best_cost": 0.036368944
}
]
}
@@ -9903,7 +11497,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 0,
- "cost": 0.145,
+ "cost": 0.001340684,
"chosen": true
}
]
@@ -10005,7 +11599,8 @@ select count(*) from seq_1_to_10000000 {
"table": "seq_1_to_10000000",
"table_scan": {
"rows": 10000000,
- "cost": 10000000
+ "read_cost": 124.7880673,
+ "read_and_compare_cost": 444.7880673
}
}
]
@@ -10013,23 +11608,30 @@ select count(*) from seq_1_to_10000000 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "seq_1_to_10000000",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 10000000,
- "cost": 10000000,
+ "rows": 10000000,
+ "rows_after_filter": 10000000,
+ "rows_out": 10000000,
+ "cost": 444.7880673,
+ "index_only": true,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 10000000,
- "cost": 10000000,
+ "rows_read": 10000000,
+ "rows_out": 10000000,
+ "cost": 444.7880673,
"uses_join_buffering": false
}
}
@@ -10037,15 +11639,17 @@ select count(*) from seq_1_to_10000000 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "seq_1_to_10000000",
"rows_for_plan": 10000000,
- "cost_for_plan": 12000000
+ "cost_for_plan": 444.7880673
}
]
},
{
- "best_join_order": ["seq_1_to_10000000"]
+ "best_join_order": ["seq_1_to_10000000"],
+ "rows": 10000000,
+ "cost": 444.7880673
},
{
"attaching_conditions_to_tables": {
@@ -10053,10 +11657,13 @@ select count(*) from seq_1_to_10000000 {
"attached_conditions_summary": [
{
"table": "seq_1_to_10000000",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -10084,7 +11691,7 @@ explain
select * from t1 left join (t2 join t3 on t3.pk=1000) on t2.a=t1.a and t2.pk is null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 const PRIMARY NULL NULL NULL 1 Impossible ON condition
-1 SIMPLE t2 const PRIMARY NULL NULL NULL 1 Impossible ON condition
+1 SIMPLE t2 const PRIMARY NULL NULL NULL 0 Impossible ON condition
1 SIMPLE t1 ALL NULL NULL NULL NULL 10
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.mark_join_nest_as_const'))
from information_schema.optimizer_trace;
@@ -10109,7 +11716,7 @@ set in_predicate_conversion_threshold=3;
explain select * from t0 where a in (1,2,3,4,5,6);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t0 ALL NULL NULL NULL NULL 10 Using where
-1 PRIMARY <derived3> ref key0 key0 4 test.t0.a 2 FirstMatch(t0)
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t0.a 1
3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
select json_detailed(json_extract(trace, '$**.in_to_subquery_conversion'))
from information_schema.optimizer_trace;
@@ -10211,8 +11818,6 @@ S
{
"access_type": "ref",
"index": "PRIMARY",
- "rows": 1.79769e308,
- "cost": 1.79769e308,
"chosen": false,
"cause": "no predicate for first keypart"
}
@@ -10374,7 +11979,7 @@ on t1.a=t.a
where t1.b < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 range idx_b idx_b 5 NULL 4 Using index condition; Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1
2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 1
select
json_detailed(json_extract(trace, '$**.choose_best_splitting'))
@@ -10387,14 +11992,17 @@ json_detailed(json_extract(trace, '$**.choose_best_splitting'))
"considered_execution_plans":
[
{
- "plan_prefix":
- [],
+ "plan_prefix": "",
"get_costs_for_tables":
[
{
"best_access_path":
{
"table": "t2",
+ "plan_details":
+ {
+ "record_count": 1
+ },
"considered_access_paths":
[
{
@@ -10403,7 +12011,7 @@ json_detailed(json_extract(trace, '$**.choose_best_splitting'))
"used_range_estimates": false,
"reason": "not available",
"rows": 1.8367,
- "cost": 2.000585794,
+ "cost": 0.002051185,
"chosen": true
},
{
@@ -10415,8 +12023,9 @@ json_detailed(json_extract(trace, '$**.choose_best_splitting'))
"chosen_access_method":
{
"type": "ref",
- "records": 1.8367,
- "cost": 2.000585794,
+ "rows_read": 1.8367,
+ "rows_out": 1.8367,
+ "cost": 0.002051185,
"uses_join_buffering": false
}
}
@@ -10424,23 +12033,28 @@ json_detailed(json_extract(trace, '$**.choose_best_splitting'))
]
},
{
- "plan_prefix":
- [],
+ "plan_prefix": "",
"table": "t2",
"rows_for_plan": 1.8367,
- "cost_for_plan": 2.367925794,
- "cost_for_sorting": 1.8367
+ "cost_for_plan": 0.002051185,
+ "cost_for_sorting": 0.001155201
}
]
},
{
- "best_splitting":
+ "split_materialized":
{
"table": "t2",
"key": "idx_a",
- "record_count": 4,
- "cost": 2.488945919,
- "unsplit_cost": 25.72361682
+ "org_cost": 0.002051185,
+ "postjoin_cost": 0.001135418,
+ "one_splitting_cost": 0.003186603,
+ "unsplit_postjoin_cost": 0.036032575,
+ "unsplit_cost": 0.060625425,
+ "rows": 1.8367,
+ "outer_rows": 4,
+ "total_splitting_cost": 0.012746412,
+ "chosen": true
}
}
]
@@ -10450,13 +12064,7 @@ json_detailed(json_extract(trace, '$**.lateral_derived'))
from
information_schema.optimizer_trace;
json_detailed(json_extract(trace, '$**.lateral_derived'))
-[
- {
- "startup_cost": 9.955783677,
- "splitting_cost": 2.488945919,
- "records": 1
- }
-]
+NULL
drop table t1,t2;
#
# Test table functions.
@@ -10503,3 +12111,925 @@ left(trace, 100)
set optimizer_trace='enabled=off';
# End of 10.6 tests
+#
+# Testing of records_out
+#
+set @save_optimizer_switch= @@optimizer_switch;
+set @save_use_stat_tables= @@use_stat_tables;
+set @save_histogram_size= @@histogram_size;
+set @save_optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
+set optimizer_switch='rowid_filter=on';
+set use_stat_tables='preferably';
+set histogram_size=127;
+create table t1 (a int, b int, c int, key(a),key(b));
+insert into t1 select seq, seq*2, seq/10 from seq_1_to_1000;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+explain select * from t1 where a<10 and b between 10 and 50 and c < 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range|filter a,b a|b 5|5 NULL 9 (2%) Using index condition; Using where; Using rowid filter
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from t1 where a<10 and b between 10 and 50 and c < 10 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select t1.a AS a,t1.b AS b,t1.c AS c from t1 where t1.a < 10 and t1.b between 10 and 50 and t1.c < 10"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.a < 10 and t1.b between 10 and 50 and t1.c < 10",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "t1.a < 10 and t1.b between 10 and 50 and t1.c < 10"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "t1.a < 10 and t1.b between 10 and 50 and t1.c < 10"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "t1.a < 10 and t1.b between 10 and 50 and t1.c < 10"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 1000,
+ "cost": 0.1671618
+ },
+ "potential_range_indexes": [
+ {
+ "index": "a",
+ "usable": true,
+ "key_parts": ["a"]
+ },
+ {
+ "index": "b",
+ "usable": true,
+ "key_parts": ["b"]
+ }
+ ],
+ "setup_range_conditions": [],
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [
+ {
+ "index": "a",
+ "ranges": ["(NULL) < (a) < (10)"],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 9,
+ "cost": 0.012445505,
+ "chosen": true
+ },
+ {
+ "index": "b",
+ "ranges": ["(10) <= (b) <= (50)"],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 21,
+ "cost": 0.027251933,
+ "chosen": false,
+ "cause": "cost"
+ }
+ ],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union": []
+ },
+ "group_index_range": {
+ "chosen": false,
+ "cause": "no group by or distinct"
+ },
+ "chosen_range_access_summary": {
+ "range_access_plan": {
+ "type": "range_scan",
+ "index": "a",
+ "rows": 9,
+ "ranges": ["(NULL) < (a) < (10)"]
+ },
+ "rows_for_plan": 9,
+ "cost_for_plan": 0.012445505,
+ "chosen": true
+ }
+ }
+ },
+ {
+ "table": "t1",
+ "rowid_filters": [
+ {
+ "key": "a",
+ "build_cost": 0.001846537,
+ "rows": 9
+ },
+ {
+ "key": "b",
+ "build_cost": 0.003322634,
+ "rows": 21
+ }
+ ]
+ },
+ {
+ "selectivity_for_indexes": [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.009
+ },
+ {
+ "index_name": "b",
+ "selectivity_from_index": 0.021
+ }
+ ],
+ "selectivity_for_columns": [
+ {
+ "column_name": "c",
+ "ranges": ["NULL < c < 10"],
+ "selectivity_from_histogram": 0.094
+ }
+ ],
+ "cond_selectivity": 0.000017766
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": "",
+ "get_costs_for_tables": [
+ {
+ "best_access_path": {
+ "table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
+ "considered_access_paths": [
+ {
+ "filter": {
+ "rowid_filter_index": "b",
+ "index_only_cost": 0.001605807,
+ "filter_startup_cost": 0.003322634,
+ "find_key_and_filter_lookup_cost": 6.695354e-4,
+ "filter_selectivity": 0.021,
+ "original_rows": 9,
+ "new_rows": 0.189,
+ "original_access_cost": 0.011607363,
+ "with_filter_access_cost": 0.002485375,
+ "original_found_rows_cost": 0.010001556,
+ "with_filter_found_rows_cost": 2.100327e-4,
+ "org_cost": 0.011895363,
+ "filter_cost": 0.005814057,
+ "filter_used": true
+ },
+ "access_type": "range",
+ "range_index": "a",
+ "rows": 9,
+ "rows_after_filter": 0.189,
+ "rows_out": 0.017766,
+ "cost": 0.005814057,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "range",
+ "rows_read": 0.189,
+ "rows_out": 0.017766,
+ "cost": 0.005814057,
+ "uses_join_buffering": false,
+ "rowid_filter_index": "b"
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "",
+ "table": "t1",
+ "rows_for_plan": 0.017766,
+ "cost_for_plan": 0.005814057,
+ "pushdown_cond_selectivity": 0.094,
+ "filtered": 0.1974,
+ "rows_out": 0.017766
+ }
+ ]
+ },
+ {
+ "best_join_order": ["t1"],
+ "rows": 0.017766,
+ "cost": 0.005814057
+ },
+ {
+ "table": "t1",
+ "range_analysis": {
+ "potential_range_indexes": [
+ {
+ "index": "a",
+ "usable": false,
+ "cause": "not applicable"
+ },
+ {
+ "index": "b",
+ "usable": true,
+ "key_parts": ["b"]
+ }
+ ],
+ "setup_range_conditions": [],
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [
+ {
+ "index": "b",
+ "ranges": ["(10) <= (b) <= (50)"],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 21,
+ "cost": 0.004244354,
+ "chosen": true
+ }
+ ]
+ },
+ "chosen_range_access_summary": {
+ "range_access_plan": {
+ "type": "range_scan",
+ "index": "b",
+ "rows": 21,
+ "ranges": ["(10) <= (b) <= (50)"]
+ },
+ "rows_for_plan": 21,
+ "cost_for_plan": 0.004244354,
+ "chosen": true
+ }
+ }
+ },
+ {
+ "substitute_best_equal": {
+ "condition": "WHERE",
+ "resulting_condition": "t1.a < 10 and t1.b between 10 and 50 and t1.c < 10"
+ }
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached_condition": "t1.a < 10 and t1.b between 10 and 50 and t1.c < 10"
+ }
+ ]
+ }
+ },
+ {
+ "make_join_readinfo": [
+ {
+ "table": "t1",
+ "index_condition": "t1.a < 10",
+ "row_condition": "t1.b between 10 and 50 and t1.c < 10"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table t1;
+create table three (a int);
+insert into three values (1),(2),(3);
+create table t1 (a int, b int, c int, key(a),key(b));
+insert into t1 select mod(seq,10), seq, seq from seq_1_to_10000;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+set optimizer_use_condition_selectivity=2;
+explain format=json select * from three, t1 where t1.a=three.a and t1.b<5000 and t1.c<1000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": 2.662022424,
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "three",
+ "access_type": "ALL",
+ "loops": 1,
+ "rows": 3,
+ "cost": 0.010504815,
+ "filtered": 100,
+ "attached_condition": "three.a is not null"
+ }
+ },
+ {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ref",
+ "possible_keys": ["a", "b"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.three.a"],
+ "rowid_filter": {
+ "range": {
+ "key": "b",
+ "used_key_parts": ["b"]
+ },
+ "rows": 4312,
+ "selectivity_pct": 43.12
+ },
+ "loops": 3,
+ "rows": 1000,
+ "cost": 2.651517609,
+ "filtered": 43.11999893,
+ "attached_condition": "t1.b < 5000 and t1.c < 1000"
+ }
+ }
+ ]
+ }
+}
+set optimizer_use_condition_selectivity=4;
+explain format=json select * from three, t1 where t1.a=three.a and t1.b<5000 and t1.c<1000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": 1.712236739,
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "three",
+ "access_type": "ALL",
+ "loops": 1,
+ "rows": 3,
+ "cost": 0.010504815,
+ "filtered": 100
+ }
+ },
+ {
+ "block-nl-join": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "possible_keys": ["a", "b"],
+ "loops": 3,
+ "rows": 10000,
+ "cost": 1.701731924,
+ "filtered": 3.230766058,
+ "attached_condition": "t1.b < 5000 and t1.c < 1000"
+ },
+ "buffer_type": "flat",
+ "buffer_size": "65",
+ "join_type": "BNL",
+ "attached_condition": "t1.a = three.a"
+ }
+ }
+ ]
+ }
+}
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain format=json select * from three, t1 where t1.a=three.a and t1.b<5000 and t1.c<1000 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select three.a AS a,t1.a AS a,t1.b AS b,t1.c AS c from three join t1 where t1.a = three.a and t1.b < 5000 and t1.c < 1000"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.a = three.a and t1.b < 5000 and t1.c < 1000",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "t1.b < 5000 and t1.c < 1000 and multiple equal(t1.a, three.a)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "t1.b < 5000 and t1.c < 1000 and multiple equal(t1.a, three.a)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "t1.b < 5000 and t1.c < 1000 and multiple equal(t1.a, three.a)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "three",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 1,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": [
+ {
+ "table": "t1",
+ "index": "a",
+ "field": "a",
+ "equals": "three.a",
+ "null_rejecting": true
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "three",
+ "table_scan": {
+ "rows": 3,
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
+ }
+ },
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 10000,
+ "cost": 1.581538
+ },
+ "potential_range_indexes": [
+ {
+ "index": "a",
+ "usable": false,
+ "cause": "not applicable"
+ },
+ {
+ "index": "b",
+ "usable": true,
+ "key_parts": ["b"]
+ }
+ ],
+ "setup_range_conditions": [],
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [
+ {
+ "index": "b",
+ "ranges": ["(NULL) < (b) < (5000)"],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 4312,
+ "cost": 5.325149412,
+ "chosen": false,
+ "cause": "cost"
+ }
+ ],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union": []
+ },
+ "group_index_range": {
+ "chosen": false,
+ "cause": "not single_table"
+ }
+ }
+ },
+ {
+ "table": "t1",
+ "rowid_filters": [
+ {
+ "key": "b",
+ "build_cost": 0.611957109,
+ "rows": 4312
+ }
+ ]
+ },
+ {
+ "selectivity_for_indexes": [
+ {
+ "index_name": "b",
+ "selectivity_from_index": 0.4312
+ }
+ ],
+ "selectivity_for_columns": [
+ {
+ "column_name": "c",
+ "ranges": ["NULL < c < 1000"],
+ "selectivity_from_histogram": 0.0999
+ }
+ ],
+ "cond_selectivity": 0.04307688
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": "",
+ "get_costs_for_tables": [
+ {
+ "best_access_path": {
+ "table": "three",
+ "plan_details": {
+ "record_count": 1
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "uses_join_buffering": false
+ }
+ }
+ },
+ {
+ "best_access_path": {
+ "table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "rows": 10000,
+ "rows_after_filter": 430.7688,
+ "rows_out": 430.7688,
+ "cost": 1.581538,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 430.7688,
+ "rows_out": 430.7688,
+ "cost": 1.581538,
+ "uses_join_buffering": false
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "",
+ "table": "three",
+ "rows_for_plan": 3,
+ "cost_for_plan": 0.010504815,
+ "rest_of_plan": [
+ {
+ "plan_prefix": "three",
+ "get_costs_for_tables": [
+ {
+ "best_access_path": {
+ "table": "t1",
+ "plan_details": {
+ "record_count": 3
+ },
+ "considered_access_paths": [
+ {
+ "access_type": "ref",
+ "index": "a",
+ "used_range_estimates": false,
+ "reason": "not available",
+ "filter": {
+ "rowid_filter_index": "b",
+ "index_only_cost": 0.092096742,
+ "filter_startup_cost": 0.611957109,
+ "find_key_and_filter_lookup_cost": 0.094772697,
+ "filter_selectivity": 0.4312,
+ "original_rows": 1000,
+ "new_rows": 431.2,
+ "original_access_cost": 1.203380742,
+ "with_filter_access_cost": 0.6660551,
+ "original_found_rows_cost": 1.111284,
+ "with_filter_found_rows_cost": 0.479185661,
+ "org_cost": 3.706142226,
+ "filter_cost": 2.651517609,
+ "filter_used": true
+ },
+ "rows": 431.2,
+ "cost": 2.651517609,
+ "chosen": true
+ },
+ {
+ "access_type": "scan_with_join_cache",
+ "rows": 10000,
+ "rows_after_filter": 430.7688,
+ "rows_out": 323.0766,
+ "cost": 1.701731924,
+ "index_only": false,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method": {
+ "type": "scan",
+ "rows_read": 430.7688,
+ "rows_out": 323.0766,
+ "cost": 1.701731924,
+ "uses_join_buffering": true
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "three",
+ "table": "t1",
+ "rows_for_plan": 969.2298,
+ "cost_for_plan": 1.712236739,
+ "pushdown_cond_selectivity": 0.75,
+ "filtered": 3.230766,
+ "rows_out": 323.0766
+ }
+ ]
+ },
+ {
+ "plan_prefix": "",
+ "table": "t1",
+ "rows_for_plan": 430.7688,
+ "cost_for_plan": 1.581538,
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "best_join_order": ["three", "t1"],
+ "rows": 969.2298,
+ "cost": 1.712236739
+ },
+ {
+ "substitute_best_equal": {
+ "condition": "WHERE",
+ "resulting_condition": "t1.a = three.a and t1.b < 5000 and t1.c < 1000"
+ }
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "attached_conditions_computation": [
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 10000,
+ "cost": 1.581538
+ },
+ "potential_range_indexes": [
+ {
+ "index": "a",
+ "usable": true,
+ "key_parts": ["a"]
+ },
+ {
+ "index": "b",
+ "usable": true,
+ "key_parts": ["b"]
+ }
+ ],
+ "setup_range_conditions": [],
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [
+ {
+ "index": "a"
+ },
+ {
+ "index": "b",
+ "ranges": ["(NULL) < (b) < (5000)"],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 4312,
+ "cost": 5.325149412,
+ "chosen": false,
+ "cause": "cost"
+ }
+ ],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union": []
+ },
+ "group_index_range": {
+ "chosen": false,
+ "cause": "not single_table"
+ }
+ }
+ }
+ ],
+ "attached_conditions_summary": [
+ {
+ "table": "three",
+ "attached_condition": null
+ },
+ {
+ "table": "t1",
+ "attached_condition": "t1.a = three.a and t1.b < 5000 and t1.c < 1000"
+ }
+ ]
+ }
+ },
+ {
+ "make_join_readinfo": []
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table three, t1;
+#
+# MDEV-21095: Index condition push down is not reflected in optimizer trace
+#
+create table t10 (a int, b int, c int, key(a,b));
+insert into t10 select seq, seq, seq from seq_1_to_10000;
+explain format=json select * from t10 where a<3 and b!=5 and c<10;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": 0.003808422,
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t10",
+ "access_type": "range",
+ "possible_keys": ["a"],
+ "key": "a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "loops": 1,
+ "rows": 2,
+ "cost": 0.003808422,
+ "filtered": 100,
+ "index_condition": "t10.a < 3 and t10.b <> 5",
+ "attached_condition": "t10.c < 10"
+ }
+ }
+ ]
+ }
+}
+set optimizer_trace='enabled=on';
+select * from t10 where a<3 and b!=5 and c<10;
+a b c
+1 1 1
+2 2 2
+select json_detailed(json_extract(trace, '$**.attaching_conditions_to_tables')) as out1
+from information_schema.optimizer_trace;
+out1
+[
+ {
+ "attached_conditions_computation":
+ [],
+ "attached_conditions_summary":
+ [
+ {
+ "table": "t10",
+ "attached_condition": "t10.a < 3 and t10.b <> 5 and t10.c < 10"
+ }
+ ]
+ }
+]
+drop table t10;
+#
+# MDEV-21092: EXISTS to IN is not reflected in the optimizer trace
+#
+set optimizer_trace='enabled=on';
+create table t1 (cn_c int, cn_n char(10), cn_a int );
+create table t2 (ci_p int, ci_c int );
+create table t3 (ci_p int, ci_c int );
+SELECT cn_n FROM t1 WHERE (EXISTS (select 1 from t2 where ci_p > 100000 and cn_c = ci_c)
+OR (cn_n LIKE 'L%') )
+AND cn_a > 1000000;
+cn_n
+select
+json_detailed(
+json_extract(trace, '$.steps[*].join_optimization[0].steps[0].transformation')
+) as out1
+from information_schema.optimizer_trace;
+out1
+[
+ {
+ "select_id": 2,
+ "from": "EXISTS (SELECT)",
+ "to": "IN (SELECT)",
+ "upper_not": false
+ }
+]
+drop table t1, t2, t3;
+#
+# MDEV-29997 Partition Pruning not included in optimizer tracing
+#
+create table t2 (a int, b int) partition by hash(a) partitions 10;
+create table t3 (a int, b int) partition by hash(a) partitions 10;
+INSERT INTO t2 SELECT seq, seq from seq_1_to_10;
+INSERT INTO t3 SELECT seq, seq from seq_1_to_10;
+set optimizer_trace='enabled=on';
+explain partitions select * from t2,t3 where t2.a in (2,3,4) and t3.a in (4,5);
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 p4,p5 ALL NULL NULL NULL NULL 2 Using where
+1 SIMPLE t2 p2,p3,p4 ALL NULL NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
+select json_detailed(json_extract(trace, '$**.prune_partitions')) as out1
+from information_schema.optimizer_trace;
+out1
+[
+ {
+ "table": "t2",
+ "used_partitions": "p2,p3,p4"
+ },
+ {
+ "table": "t3",
+ "used_partitions": "p4,p5"
+ }
+]
+drop table t2,t3;
+create table t1 (
+a int
+) partition by range (a)
+( partition p0 values less than(10),
+partition p1 values less than (20),
+partition p2 values less than (25)
+);
+insert into t1 values (5),(15),(22);
+explain select * from t1 where a = 28;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+select json_detailed(json_extract(trace, '$**.prune_partitions')) as out1
+from information_schema.optimizer_trace;
+out1
+[
+ {
+ "table": "t1",
+ "used_partitions": ""
+ }
+]
+drop table t1;
+set @@optimizer_switch= @save_optimizer_switch;
+set @@use_stat_tables= @save_use_stat_tables;
+set @@histogram_size= @save_histogram_size;
+set @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test
index d07afb2dfce..6f8040f1689 100644
--- a/mysql-test/main/opt_trace.test
+++ b/mysql-test/main/opt_trace.test
@@ -85,23 +85,7 @@ drop table t1,t2,t0;
--echo # group_by min max optimization
--echo #
CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a INT NOT NULL, KEY(a));
---disable_query_log
-INSERT INTO t1(a) VALUES (1), (2), (3), (4);
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
-INSERT INTO t1(a) SELECT a FROM t1;
---enable_query_log
+insert into t1 select seq, mod(seq,4)+1 from seq_1_to_65536;
analyze table t1;
EXPLAIN SELECT DISTINCT a FROM t1;
@@ -115,6 +99,7 @@ CREATE TABLE t1 (a INT, b INT, c int, d int, KEY(a,b,c,d));
INSERT INTO t1 VALUES (1,1,1,1), (2,2,2,2), (3,3,3,3), (4,4,4,4), (1,0,1,1), (3,2,3,3), (4,5,4,4);
ANALYZE TABLE t1;
EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a;
+set statement optimizer_scan_setup_cost=0 for EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a;
select * from information_schema.OPTIMIZER_TRACE;
DROP TABLE t1;
@@ -138,30 +123,27 @@ drop table t1;
--echo # Late ORDER BY optimization
--echo #
-create table ten(a int);
-insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table one_k(a int primary key);
-insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C;
create table t1 (
pk int not null,
a int,
b int,
c int,
filler char(100),
- KEY a_a(c),
+ KEY c(c),
KEY a_c(a,c),
KEY a_b(a,b)
);
-insert into t1
-select a, a,a,a, 'filler-dataaa' from test.one_k;
+insert into t1 select seq, seq,seq,seq, 'filler-dataaa' from seq_0_to_999;
update t1 set a=1 where pk between 0 and 180;
update t1 set b=2 where pk between 0 and 20;
analyze table t1;
+explain select * from t1 where a=1 and b=2 order by c limit 1;
+update t1 set b=2 where pk between 20 and 40;
set optimizer_trace='enabled=on';
explain select * from t1 where a=1 and b=2 order by c limit 1;
select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
-drop table t1,ten,one_k;
+drop table t1;
--echo #
--echo # TABLE ELIMINATION
@@ -201,34 +183,23 @@ drop table t0, t1, t2, t3;
--echo # IN subquery to sem-join is traced
--echo #
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-
create table t1(a int, b int);
-insert into t1 values (0,0),(1,1),(2,2);
-create table t2 as select * from t1;
+insert into t1 select seq,seq from seq_0_to_3;
-create table t11(a int, b int);
-
-create table t10 (pk int, a int);
-insert into t10 select a,a from t0;
-create table t12 like t10;
-insert into t12 select * from t10;
-
-analyze table t1,t10;
+create table t2 (p int, a int);
+insert into t2 select seq,seq from seq_1_to_10;
set optimizer_trace='enabled=on';
-explain extended select * from t1 where a in (select pk from t10);
+explain extended select * from t1 where a in (select p from t2);
+insert into t2 select seq,seq from seq_10_to_100;
+explain extended select * from t1 where a in (select p from t2);
select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
-drop table t0,t1,t11,t10,t12,t2;
+drop table t1,t2;
--echo #
--echo # Selectivities for columns and indexes.
--echo #
-create table t0 (a int);
-insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-
create table t1 (
pk int,
a int,
@@ -236,7 +207,7 @@ b int,
key pk(pk),
key pk_a(pk,a),
key pk_a_b(pk,a,b));
-insert into t1 select a,a,a from t0;
+insert into t1 select seq,seq,seq from seq_0_to_9;
ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a,b) INDEXES ();
set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
@@ -248,7 +219,7 @@ explain select * from t1 where pk = 2 and a=5 and b=1;
select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
set @@use_stat_tables= @save_use_stat_tables;
-drop table t0,t1;
+drop table t1;
set optimizer_trace="enabled=off";
--echo #
@@ -928,3 +899,106 @@ set optimizer_trace='enabled=off';
--echo # End of 10.6 tests
+
+--echo #
+--echo # Testing of records_out
+--echo #
+
+set @save_optimizer_switch= @@optimizer_switch;
+set @save_use_stat_tables= @@use_stat_tables;
+set @save_histogram_size= @@histogram_size;
+set @save_optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
+set optimizer_switch='rowid_filter=on';
+set use_stat_tables='preferably';
+set histogram_size=127;
+create table t1 (a int, b int, c int, key(a),key(b));
+insert into t1 select seq, seq*2, seq/10 from seq_1_to_1000;
+analyze table t1;
+--optimizer_trace
+explain select * from t1 where a<10 and b between 10 and 50 and c < 10;
+drop table t1;
+
+create table three (a int);
+insert into three values (1),(2),(3);
+create table t1 (a int, b int, c int, key(a),key(b));
+insert into t1 select mod(seq,10), seq, seq from seq_1_to_10000;
+analyze table t1;
+
+set optimizer_use_condition_selectivity=2;
+explain format=json select * from three, t1 where t1.a=three.a and t1.b<5000 and t1.c<1000;
+set optimizer_use_condition_selectivity=4;
+--optimizer_trace
+explain format=json select * from three, t1 where t1.a=three.a and t1.b<5000 and t1.c<1000;
+
+drop table three, t1;
+
+--echo #
+--echo # MDEV-21095: Index condition push down is not reflected in optimizer trace
+--echo #
+create table t10 (a int, b int, c int, key(a,b));
+insert into t10 select seq, seq, seq from seq_1_to_10000;
+explain format=json select * from t10 where a<3 and b!=5 and c<10;
+set optimizer_trace='enabled=on';
+select * from t10 where a<3 and b!=5 and c<10;
+select json_detailed(json_extract(trace, '$**.attaching_conditions_to_tables')) as out1
+from information_schema.optimizer_trace;
+drop table t10;
+
+--echo #
+--echo # MDEV-21092: EXISTS to IN is not reflected in the optimizer trace
+--echo #
+# EXISTS-to-IN conversion is traced on PREPARE so won't be visible with a VIEW:
+--disable_view_protocol
+set optimizer_trace='enabled=on';
+
+create table t1 (cn_c int, cn_n char(10), cn_a int );
+create table t2 (ci_p int, ci_c int );
+create table t3 (ci_p int, ci_c int );
+
+SELECT cn_n FROM t1 WHERE (EXISTS (select 1 from t2 where ci_p > 100000 and cn_c = ci_c)
+ OR (cn_n LIKE 'L%') )
+ AND cn_a > 1000000;
+
+select
+ json_detailed(
+ json_extract(trace, '$.steps[*].join_optimization[0].steps[0].transformation')
+ ) as out1
+from information_schema.optimizer_trace;
+
+--enable_view_protocol
+drop table t1, t2, t3;
+
+--echo #
+--echo # MDEV-29997 Partition Pruning not included in optimizer tracing
+--echo #
+--source include/have_partition.inc
+create table t2 (a int, b int) partition by hash(a) partitions 10;
+create table t3 (a int, b int) partition by hash(a) partitions 10;
+INSERT INTO t2 SELECT seq, seq from seq_1_to_10;
+INSERT INTO t3 SELECT seq, seq from seq_1_to_10;
+
+set optimizer_trace='enabled=on';
+explain partitions select * from t2,t3 where t2.a in (2,3,4) and t3.a in (4,5);
+select json_detailed(json_extract(trace, '$**.prune_partitions')) as out1
+from information_schema.optimizer_trace;
+drop table t2,t3;
+
+create table t1 (
+ a int
+) partition by range (a)
+( partition p0 values less than(10),
+ partition p1 values less than (20),
+ partition p2 values less than (25)
+);
+insert into t1 values (5),(15),(22);
+
+explain select * from t1 where a = 28;
+select json_detailed(json_extract(trace, '$**.prune_partitions')) as out1
+from information_schema.optimizer_trace;
+drop table t1;
+
+set @@optimizer_switch= @save_optimizer_switch;
+set @@use_stat_tables= @save_use_stat_tables;
+set @@histogram_size= @save_histogram_size;
+set @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+
diff --git a/mysql-test/main/opt_trace_index_merge.result b/mysql-test/main/opt_trace_index_merge.result
index 0ffa930e2b0..994e6e88452 100644
--- a/mysql-test/main/opt_trace_index_merge.result
+++ b/mysql-test/main/opt_trace_index_merge.result
@@ -73,7 +73,7 @@ explain select * from t1 where a=1 or b=1 {
"range_analysis": {
"table_scan": {
"rows": 1000,
- "cost": 231.5878906
+ "cost": 0.1729314
},
"potential_range_indexes": [
{
@@ -98,7 +98,9 @@ explain select * from t1 where a=1 or b=1 {
"analyzing_roworder_intersect": {
"cause": "too few roworder scans"
},
- "analyzing_sort_intersect": {},
+ "analyzing_sort_intersect": {
+ "cutoff_cost": 0.1729314
+ },
"analyzing_index_merge_union": [
{
"indexes_to_merge": [
@@ -111,12 +113,12 @@ explain select * from t1 where a=1 or b=1 {
"using_mrr": false,
"index_only": true,
"rows": 1,
- "cost": 0.345585794,
+ "cost": 0.001478954,
"chosen": true
}
],
"index_to_merge": "a",
- "cumulated_cost": 0.345585794
+ "cumulated_cost": 0.001478954
},
{
"range_scan_alternatives": [
@@ -127,15 +129,15 @@ explain select * from t1 where a=1 or b=1 {
"using_mrr": false,
"index_only": true,
"rows": 1,
- "cost": 0.345585794,
+ "cost": 0.001478954,
"chosen": true
}
],
"index_to_merge": "b",
- "cumulated_cost": 0.691171589
+ "cumulated_cost": 0.002957908
}
],
- "cost_of_reading_ranges": 0.691171589,
+ "cost_of_reading_ranges": 0.002957908,
"use_roworder_union": true,
"cause": "always cheaper than non roworder retrieval",
"analyzing_roworder_scans": [
@@ -158,7 +160,7 @@ explain select * from t1 where a=1 or b=1 {
}
}
],
- "index_roworder_union_cost": 2.484903732,
+ "index_roworder_union_cost": 0.005185782,
"members": 2,
"chosen": true
}
@@ -187,13 +189,17 @@ explain select * from t1 where a=1 or b=1 {
]
},
"rows_for_plan": 2,
- "cost_for_plan": 2.484903732,
+ "cost_for_plan": 0.005185782,
"chosen": true
}
}
},
{
- "selectivity_for_indexes": [],
+ "selectivity_for_indexes": [
+ {
+ "use_opt_range_condition_rows_selectivity": 0.002
+ }
+ ],
"selectivity_for_columns": [],
"cond_selectivity": 0.002
}
@@ -202,23 +208,29 @@ explain select * from t1 where a=1 or b=1 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "index_merge",
- "resulting_rows": 2,
- "cost": 2.484903732,
+ "rows": 2,
+ "rows_after_filter": 2,
+ "rows_out": 2,
+ "cost": 0.005185782,
"chosen": true
}
],
"chosen_access_method": {
"type": "index_merge",
- "records": 2,
- "cost": 2.484903732,
+ "rows_read": 2,
+ "rows_out": 2,
+ "cost": 0.005185782,
"uses_join_buffering": false
}
}
@@ -226,15 +238,17 @@ explain select * from t1 where a=1 or b=1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 2,
- "cost_for_plan": 2.884903732
+ "cost_for_plan": 0.005185782
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 2,
+ "cost": 0.005185782
},
{
"substitute_best_equal": {
@@ -248,10 +262,13 @@ explain select * from t1 where a=1 or b=1 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": "t1.a = 1 or t1.b = 1"
+ "attached_condition": "t1.a = 1 or t1.b = 1"
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -315,7 +332,7 @@ set optimizer_trace='enabled=on';
# 3-way ROR-intersection
explain select key1,key2,key3 from t1 where key1=100 and key2=100 and key3=100;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge key1,key2,key3 key1,key2,key3 5,5,5 NULL 2 Using intersect(key1,key2,key3); Using where; Using index
+1 SIMPLE t1 index_merge key1,key2,key3 key1,key2 5,5 NULL 77 Using intersect(key1,key2); Using where
select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
@@ -330,7 +347,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 2243,
- "cost": 2700.058937,
+ "cost": 2.770351251,
"chosen": true
},
{
@@ -341,7 +358,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 2243,
- "cost": 2700.058937,
+ "cost": 2.770351251,
"chosen": false,
"cause": "cost"
},
@@ -353,7 +370,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 2243,
- "cost": 2700.058937,
+ "cost": 2.770351251,
"chosen": false,
"cause": "cost"
}
@@ -364,10 +381,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"index": "key1",
- "index_scan_cost": 10.31393703,
- "cumulated_index_scan_cost": 10.31393703,
- "disk_sweep_cost": 1923.144061,
- "cumulative_total_cost": 1933.457998,
+ "index_scan_cost": 0.240986767,
+ "cumulated_index_scan_cost": 0.240986767,
+ "disk_sweep_cost": 2.564386012,
+ "cumulative_total_cost": 2.805372779,
"usable": true,
"matching_rows_now": 2243,
"intersect_covering_with_this_index": false,
@@ -375,10 +392,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
},
{
"index": "key2",
- "index_scan_cost": 10.31393703,
- "cumulated_index_scan_cost": 20.62787405,
- "disk_sweep_cost": 84.51771758,
- "cumulative_total_cost": 105.1455916,
+ "index_scan_cost": 0.240986767,
+ "cumulated_index_scan_cost": 0.481973534,
+ "disk_sweep_cost": 0.089164506,
+ "cumulative_total_cost": 0.57113804,
"usable": true,
"matching_rows_now": 77.6360508,
"intersect_covering_with_this_index": false,
@@ -386,14 +403,15 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
},
{
"index": "key3",
- "index_scan_cost": 10.31393703,
- "cumulated_index_scan_cost": 30.94181108,
+ "index_scan_cost": 0.240986767,
+ "cumulated_index_scan_cost": 0.722960301,
"disk_sweep_cost": 0,
- "cumulative_total_cost": 30.94181108,
+ "cumulative_total_cost": 0.722960301,
"usable": true,
"matching_rows_now": 2.687185191,
"intersect_covering_with_this_index": true,
- "chosen": true
+ "chosen": false,
+ "cause": "does not reduce cost"
}
],
"clustered_pk":
@@ -401,9 +419,9 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"clustered_pk_added_to_intersect": false,
"cause": "no clustered pk index"
},
- "rows": 2,
- "cost": 30.94181108,
- "covering": true,
+ "rows": 77,
+ "cost": 0.573622393,
+ "covering": false,
"chosen": true
},
"analyzing_index_merge_union":
@@ -417,9 +435,9 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
"range_access_plan":
{
"type": "index_roworder_intersect",
- "rows": 2,
- "cost": 30.94181108,
- "covering": true,
+ "rows": 77,
+ "cost": 0.573622393,
+ "covering": false,
"clustered_pk_scan": false,
"intersect_of":
[
@@ -436,18 +454,11 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
"rows": 2243,
"ranges":
["(100) <= (key2) <= (100)"]
- },
- {
- "type": "range_scan",
- "index": "key3",
- "rows": 2243,
- "ranges":
- ["(100) <= (key3) <= (100)"]
}
]
},
- "rows_for_plan": 2,
- "cost_for_plan": 30.94181108,
+ "rows_for_plan": 77,
+ "cost_for_plan": 0.573622393,
"chosen": true
}
]
@@ -481,7 +492,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": true,
"rows": 2243,
- "cost": 457.058937,
+ "cost": 0.312922694,
"chosen": true
},
{
@@ -492,13 +503,13 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": true,
"rows": 2243,
- "cost": 457.058937,
+ "cost": 0.312922694,
"chosen": false,
"cause": "cost"
}
],
"index_to_merge": "key1",
- "cumulated_cost": 457.058937
+ "cumulated_cost": 0.312922694
},
{
"range_scan_alternatives":
@@ -511,7 +522,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": true,
"rows": 2243,
- "cost": 457.058937,
+ "cost": 0.312922694,
"chosen": true
},
{
@@ -522,16 +533,16 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": true,
"rows": 2243,
- "cost": 457.058937,
+ "cost": 0.312922694,
"chosen": false,
"cause": "cost"
}
],
"index_to_merge": "key3",
- "cumulated_cost": 914.1178741
+ "cumulated_cost": 0.625845388
}
],
- "cost_of_reading_ranges": 914.1178741,
+ "cost_of_reading_ranges": 0.625845388,
"use_roworder_union": true,
"cause": "always cheaper than non roworder retrieval",
"analyzing_roworder_scans":
@@ -548,10 +559,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"index": "key1",
- "index_scan_cost": 10.31393703,
- "cumulated_index_scan_cost": 10.31393703,
- "disk_sweep_cost": 1923.144061,
- "cumulative_total_cost": 1933.457998,
+ "index_scan_cost": 0.240986767,
+ "cumulated_index_scan_cost": 0.240986767,
+ "disk_sweep_cost": 2.564386012,
+ "cumulative_total_cost": 2.805372779,
"usable": true,
"matching_rows_now": 2243,
"intersect_covering_with_this_index": false,
@@ -559,10 +570,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
},
{
"index": "key2",
- "index_scan_cost": 10.31393703,
- "cumulated_index_scan_cost": 20.62787405,
- "disk_sweep_cost": 84.51771758,
- "cumulative_total_cost": 105.1455916,
+ "index_scan_cost": 0.240986767,
+ "cumulated_index_scan_cost": 0.481973534,
+ "disk_sweep_cost": 0.089164506,
+ "cumulative_total_cost": 0.57113804,
"usable": true,
"matching_rows_now": 77.6360508,
"intersect_covering_with_this_index": false,
@@ -575,7 +586,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "no clustered pk index"
},
"rows": 77,
- "cost": 105.1455916,
+ "cost": 0.573622393,
"covering": false,
"chosen": true
}
@@ -592,10 +603,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
[
{
"index": "key3",
- "index_scan_cost": 10.31393703,
- "cumulated_index_scan_cost": 10.31393703,
- "disk_sweep_cost": 1923.144061,
- "cumulative_total_cost": 1933.457998,
+ "index_scan_cost": 0.240986767,
+ "cumulated_index_scan_cost": 0.240986767,
+ "disk_sweep_cost": 2.564386012,
+ "cumulative_total_cost": 2.805372779,
"usable": true,
"matching_rows_now": 2243,
"intersect_covering_with_this_index": false,
@@ -603,10 +614,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
},
{
"index": "key4",
- "index_scan_cost": 10.31393703,
- "cumulated_index_scan_cost": 20.62787405,
- "disk_sweep_cost": 84.51771758,
- "cumulative_total_cost": 105.1455916,
+ "index_scan_cost": 0.240986767,
+ "cumulated_index_scan_cost": 0.481973534,
+ "disk_sweep_cost": 0.089164506,
+ "cumulative_total_cost": 0.57113804,
"usable": true,
"matching_rows_now": 77.6360508,
"intersect_covering_with_this_index": false,
@@ -619,13 +630,13 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"cause": "no clustered pk index"
},
"rows": 77,
- "cost": 105.1455916,
+ "cost": 0.573622393,
"covering": false,
"chosen": true
}
}
],
- "index_roworder_union_cost": 194.9771115,
+ "index_roworder_union_cost": 1.135493366,
"members": 2,
"chosen": true
}
@@ -644,7 +655,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
{
"type": "index_roworder_intersect",
"rows": 77,
- "cost": 105.1455916,
+ "cost": 0.573622393,
"covering": false,
"clustered_pk_scan": false,
"intersect_of":
@@ -668,7 +679,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
{
"type": "index_roworder_intersect",
"rows": 77,
- "cost": 105.1455916,
+ "cost": 0.573622393,
"covering": false,
"clustered_pk_scan": false,
"intersect_of":
@@ -692,7 +703,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary'))
]
},
"rows_for_plan": 154,
- "cost_for_plan": 194.9771115,
+ "cost_for_plan": 1.135493366,
"chosen": true
}
]
diff --git a/mysql-test/main/opt_trace_index_merge_innodb.result b/mysql-test/main/opt_trace_index_merge_innodb.result
index adb9cd5d622..02509aa9610 100644
--- a/mysql-test/main/opt_trace_index_merge_innodb.result
+++ b/mysql-test/main/opt_trace_index_merge_innodb.result
@@ -89,7 +89,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"range_analysis": {
"table_scan": {
"rows": 1000,
- "cost": 206
+ "cost": 0.1764192
},
"potential_range_indexes": [
{
@@ -118,8 +118,9 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"using_mrr": false,
"index_only": false,
"rows": 1000,
- "cost": 204.27,
- "chosen": true
+ "cost": 0.19598856,
+ "chosen": false,
+ "cause": "cost"
},
{
"index": "key1",
@@ -128,7 +129,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"using_mrr": false,
"index_only": false,
"rows": 1,
- "cost": 1.345146475,
+ "cost": 0.00424968,
"chosen": true
}
],
@@ -136,10 +137,10 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"intersecting_indexes": [
{
"index": "key1",
- "index_scan_cost": 1.000146475,
- "cumulated_index_scan_cost": 1.000146475,
- "disk_sweep_cost": 1.004153686,
- "cumulative_total_cost": 2.004300162,
+ "index_scan_cost": 0.001661605,
+ "cumulated_index_scan_cost": 0.001661605,
+ "disk_sweep_cost": 0.00171364,
+ "cumulative_total_cost": 0.003375245,
"usable": true,
"matching_rows_now": 1,
"intersect_covering_with_this_index": false,
@@ -151,7 +152,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"cause": "cost"
},
"chosen": false,
- "cause": "cost"
+ "cause": "too few indexes to merge"
},
"analyzing_index_merge_union": []
},
@@ -167,7 +168,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"ranges": ["(1) <= (key1) <= (1)"]
},
"rows_for_plan": 1,
- "cost_for_plan": 1.345146475,
+ "cost_for_plan": 0.00424968,
"chosen": true
}
}
@@ -177,7 +178,7 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"rowid_filters": [
{
"key": "key1",
- "build_cost": 0.130146475,
+ "build_cost": 0.001763258,
"rows": 1
}
]
@@ -201,18 +202,21 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "ref",
"index": "key1",
"used_range_estimates": true,
"rows": 1,
- "cost": 1.125146475,
+ "cost": 0.00345856,
"chosen": true
},
{
@@ -223,8 +227,9 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
],
"chosen_access_method": {
"type": "ref",
- "records": 1,
- "cost": 1.125146475,
+ "rows_read": 1,
+ "rows_out": 1,
+ "cost": 0.00345856,
"uses_join_buffering": false
}
}
@@ -232,15 +237,17 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 1,
- "cost_for_plan": 1.325146475
+ "cost_for_plan": 0.00345856
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 1,
+ "cost": 0.00345856
},
{
"substitute_best_equal": {
@@ -254,10 +261,18 @@ explain select * from t1 where pk1 != 0 and key1 = 1 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": "t1.pk1 <> 0"
+ "attached_condition": "t1.pk1 <> 0"
}
]
}
+ },
+ {
+ "make_join_readinfo": [
+ {
+ "table": "t1",
+ "index_condition": "t1.pk1 <> 0"
+ }
+ ]
}
]
}
diff --git a/mysql-test/main/opt_trace_security.result b/mysql-test/main/opt_trace_security.result
index 48ca5c5e36f..9ef6cadec5f 100644
--- a/mysql-test/main/opt_trace_security.result
+++ b/mysql-test/main/opt_trace_security.result
@@ -80,7 +80,8 @@ select * from db1.t1 {
"table": "t1",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
}
]
@@ -88,23 +89,30 @@ select * from db1.t1 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -112,15 +120,17 @@ select * from db1.t1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953
+ "cost_for_plan": 0.010504815
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 3,
+ "cost": 0.010504815
},
{
"attaching_conditions_to_tables": {
@@ -128,10 +138,13 @@ select * from db1.t1 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
@@ -209,7 +222,8 @@ select * from db1.v1 {
"table": "t1",
"table_scan": {
"rows": 3,
- "cost": 2.005126953
+ "read_cost": 0.010408815,
+ "read_and_compare_cost": 0.010504815
}
}
]
@@ -217,23 +231,30 @@ select * from db1.v1 {
{
"considered_execution_plans": [
{
- "plan_prefix": [],
+ "plan_prefix": "",
"get_costs_for_tables": [
{
"best_access_path": {
"table": "t1",
+ "plan_details": {
+ "record_count": 1
+ },
"considered_access_paths": [
{
"access_type": "scan",
- "resulting_rows": 3,
- "cost": 2.005126953,
+ "rows": 3,
+ "rows_after_filter": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
+ "index_only": false,
"chosen": true
}
],
"chosen_access_method": {
"type": "scan",
- "records": 3,
- "cost": 2.005126953,
+ "rows_read": 3,
+ "rows_out": 3,
+ "cost": 0.010504815,
"uses_join_buffering": false
}
}
@@ -241,15 +262,17 @@ select * from db1.v1 {
]
},
{
- "plan_prefix": [],
+ "plan_prefix": "",
"table": "t1",
"rows_for_plan": 3,
- "cost_for_plan": 2.605126953
+ "cost_for_plan": 0.010504815
}
]
},
{
- "best_join_order": ["t1"]
+ "best_join_order": ["t1"],
+ "rows": 3,
+ "cost": 0.010504815
},
{
"attaching_conditions_to_tables": {
@@ -257,10 +280,13 @@ select * from db1.v1 {
"attached_conditions_summary": [
{
"table": "t1",
- "attached": null
+ "attached_condition": null
}
]
}
+ },
+ {
+ "make_join_readinfo": []
}
]
}
diff --git a/mysql-test/main/opt_trace_selectivity.result b/mysql-test/main/opt_trace_selectivity.result
new file mode 100644
index 00000000000..d6abad79637
--- /dev/null
+++ b/mysql-test/main/opt_trace_selectivity.result
@@ -0,0 +1,369 @@
+create or replace table t1 (a int, b int, c int, key(a,c), key(b,c), key (c,b)) engine=aria;
+insert into t1 select seq/100+1, mod(seq,10), mod(seq,15) from seq_1_to_10000;
+insert into t1 select seq/100+1, mod(seq,10), 10 from seq_1_to_1000;
+optimize table t1;
+Table Op Msg_type Msg_text
+test.t1 optimize status OK
+select count(*) from t1 where a=2;
+count(*)
+200
+select count(*) from t1 where b=5;
+count(*)
+1100
+select count(*) from t1 where c=5;
+count(*)
+667
+select count(*) from t1 where c=10;
+count(*)
+1667
+select count(*) from t1 where a=2 and b=5;
+count(*)
+20
+select count(*) from t1 where c=10 and b=5;
+count(*)
+433
+select count(*) from t1 where c=5 and b=5;
+count(*)
+334
+set optimizer_trace="enabled=on";
+select count(*) from t1 where a=2 and b=5 and c=10;
+count(*)
+14
+set @trace=(select trace from INFORMATION_SCHEMA.OPTIMIZER_TRACE);
+select
+JSON_DETAILED(
+JSON_EXTRACT(
+JSON_EXTRACT(@trace, '$**.considered_execution_plans'),
+'$[0]'
+ )
+) as JS;
+JS
+[
+ {
+ "plan_prefix": "",
+ "get_costs_for_tables":
+ [
+ {
+ "best_access_path":
+ {
+ "table": "t1",
+ "plan_details":
+ {
+ "record_count": 1
+ },
+ "considered_access_paths":
+ [
+ {
+ "access_type": "ref",
+ "index": "a",
+ "used_range_estimates": true,
+ "rows": 104,
+ "cost": 0.060988785,
+ "chosen": true
+ },
+ {
+ "access_type": "ref",
+ "index": "b",
+ "used_range_estimates": true,
+ "rows": 340,
+ "cost": 0.141618657,
+ "chosen": false,
+ "cause": "cost"
+ },
+ {
+ "access_type": "ref",
+ "index": "c",
+ "used_range_estimates": true,
+ "rows": 632,
+ "cost": 0.241826241,
+ "chosen": false,
+ "cause": "cost"
+ },
+ {
+ "access_type": "index_merge",
+ "rows": 7,
+ "rows_after_filter": 7,
+ "rows_out": 7,
+ "cost": 0.045367017,
+ "chosen": true
+ }
+ ],
+ "chosen_access_method":
+ {
+ "type": "index_merge",
+ "rows_read": 7,
+ "rows_out": 7,
+ "cost": 0.045367017,
+ "uses_join_buffering": false
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "",
+ "table": "t1",
+ "rows_for_plan": 7,
+ "cost_for_plan": 0.045367017
+ }
+]
+select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.selectivity_for_indexes')) as JS;
+JS
+[
+ [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.009454545
+ },
+ {
+ "index_name": "b",
+ "selectivity_from_index": 0.1
+ },
+ {
+ "use_opt_range_condition_rows_selectivity": 6.363636e-4
+ }
+ ]
+]
+select count(*) from t1 where a=2 and b=5 and c=5;
+count(*)
+3
+set @trace=(select trace from INFORMATION_SCHEMA.OPTIMIZER_TRACE);
+select
+JSON_DETAILED(
+JSON_EXTRACT(
+JSON_EXTRACT(@trace, '$**.considered_execution_plans'),
+'$[0]'
+ )
+) as JS;
+JS
+[
+ {
+ "plan_prefix": "",
+ "get_costs_for_tables":
+ [
+ {
+ "best_access_path":
+ {
+ "table": "t1",
+ "plan_details":
+ {
+ "record_count": 1
+ },
+ "considered_access_paths":
+ [
+ {
+ "access_type": "ref",
+ "index": "a",
+ "used_range_estimates": true,
+ "rows": 6,
+ "cost": 0.005388489,
+ "chosen": true
+ },
+ {
+ "access_type": "ref",
+ "index": "b",
+ "used_range_estimates": true,
+ "rows": 232,
+ "cost": 0.104720241,
+ "chosen": false,
+ "cause": "cost"
+ },
+ {
+ "access_type": "ref",
+ "index": "c",
+ "used_range_estimates": true,
+ "rows": 293,
+ "cost": 0.125561013,
+ "chosen": false,
+ "cause": "cost"
+ },
+ {
+ "type": "scan",
+ "chosen": false,
+ "cause": "cost"
+ }
+ ],
+ "chosen_access_method":
+ {
+ "type": "ref",
+ "rows_read": 6,
+ "rows_out": 0.6,
+ "cost": 0.005388489,
+ "uses_join_buffering": false
+ }
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": "",
+ "table": "t1",
+ "rows_for_plan": 0.6,
+ "cost_for_plan": 0.005388489,
+ "pushdown_cond_selectivity": 0.1,
+ "filtered": 10,
+ "rows_out": 0.6
+ }
+]
+select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.selectivity_for_indexes')) as JS;
+JS
+[
+ [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 5.454545e-4
+ },
+ {
+ "index_name": "b",
+ "selectivity_from_index": 0.1
+ }
+ ]
+]
+# Ensure that we only use selectivity from non used index for simple cases
+select count(*) from t1 where (a=2 and b= 5);
+count(*)
+20
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JS
+[
+ [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.017545455
+ },
+ {
+ "index_name": "b",
+ "selectivity_from_index": 0.073181818
+ }
+ ]
+]
+# All of the following should have selectivity=1 for index 'b'
+select count(*) from t1 where (a=2 and b between 0 and 100);
+count(*)
+200
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JS
+[
+ [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.017545455
+ },
+ {
+ "index_name": "b",
+ "selectivity_from_index": 1
+ }
+ ]
+]
+select count(*) from t1 where (a in (2,3) and b between 0 and 100);
+count(*)
+400
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JS
+[
+ [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.035090909
+ },
+ {
+ "index_name": "b",
+ "selectivity_from_index": 1
+ }
+ ]
+]
+select count(*) from t1 where (a>2 and b between 0 and 100);
+count(*)
+10702
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JS
+[
+ [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.973909091
+ },
+ {
+ "index_name": "b",
+ "selectivity_from_index": 1
+ }
+ ]
+]
+select count(*) from t1 where (a>=2 and b between 0 and 100);
+count(*)
+10902
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JS
+[
+ [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.991454545
+ },
+ {
+ "index_name": "b",
+ "selectivity_from_index": 1
+ }
+ ]
+]
+select count(*) from t1 where (a<=2 and b between 0 and 100);
+count(*)
+298
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JS
+[
+ [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.026181818
+ },
+ {
+ "index_name": "b",
+ "selectivity_from_index": 1
+ }
+ ]
+]
+select count(*) from t1 where (a<2 and b between 0 and 100);
+count(*)
+98
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JS
+[
+ [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.008636364
+ },
+ {
+ "index_name": "b",
+ "selectivity_from_index": 1
+ }
+ ]
+]
+select count(*) from t1 where (a between 2 and 3 and b between 0 and 100);
+count(*)
+400
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+JS
+[
+ [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.035090909
+ },
+ {
+ "index_name": "b",
+ "selectivity_from_index": 1
+ }
+ ]
+]
+drop table t1;
+set optimizer_trace='enabled=off';
diff --git a/mysql-test/main/opt_trace_selectivity.test b/mysql-test/main/opt_trace_selectivity.test
new file mode 100644
index 00000000000..40f78d91db7
--- /dev/null
+++ b/mysql-test/main/opt_trace_selectivity.test
@@ -0,0 +1,86 @@
+--source include/have_sequence.inc
+--source include/not_embedded.inc
+
+#
+# Test changes in calculate_cond_selectivity_for_table()
+#
+create or replace table t1 (a int, b int, c int, key(a,c), key(b,c), key (c,b)) engine=aria;
+insert into t1 select seq/100+1, mod(seq,10), mod(seq,15) from seq_1_to_10000;
+insert into t1 select seq/100+1, mod(seq,10), 10 from seq_1_to_1000;
+optimize table t1;
+
+select count(*) from t1 where a=2;
+select count(*) from t1 where b=5;
+select count(*) from t1 where c=5;
+select count(*) from t1 where c=10;
+select count(*) from t1 where a=2 and b=5;
+select count(*) from t1 where c=10 and b=5;
+select count(*) from t1 where c=5 and b=5;
+
+set optimizer_trace="enabled=on";
+select count(*) from t1 where a=2 and b=5 and c=10;
+
+set @trace=(select trace from INFORMATION_SCHEMA.OPTIMIZER_TRACE);
+
+# The second JSON_EXTRACT is for --view-protocol which wraps every select:
+select
+ JSON_DETAILED(
+ JSON_EXTRACT(
+ JSON_EXTRACT(@trace, '$**.considered_execution_plans'),
+ '$[0]'
+ )
+ ) as JS;
+
+select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.selectivity_for_indexes')) as JS;
+
+select count(*) from t1 where a=2 and b=5 and c=5;
+set @trace=(select trace from INFORMATION_SCHEMA.OPTIMIZER_TRACE);
+
+# The second JSON_EXTRACT is for --view-protocol which wraps every select:
+select
+ JSON_DETAILED(
+ JSON_EXTRACT(
+ JSON_EXTRACT(@trace, '$**.considered_execution_plans'),
+ '$[0]'
+ )
+ ) as JS;
+select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.selectivity_for_indexes')) as JS;
+
+--echo # Ensure that we only use selectivity from non used index for simple cases
+
+
+select count(*) from t1 where (a=2 and b= 5);
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+--echo # All of the following should have selectivity=1 for index 'b'
+select count(*) from t1 where (a=2 and b between 0 and 100);
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+select count(*) from t1 where (a in (2,3) and b between 0 and 100);
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+select count(*) from t1 where (a>2 and b between 0 and 100);
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+select count(*) from t1 where (a>=2 and b between 0 and 100);
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+select count(*) from t1 where (a<=2 and b between 0 and 100);
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+select count(*) from t1 where (a<2 and b between 0 and 100);
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+select count(*) from t1 where (a between 2 and 3 and b between 0 and 100);
+select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_indexes')) as JS
+from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+drop table t1;
+set optimizer_trace='enabled=off';
diff --git a/mysql-test/main/opt_trace_ucs2.result b/mysql-test/main/opt_trace_ucs2.result
index 9e4f25f3150..e89750ec3b0 100644
--- a/mysql-test/main/opt_trace_ucs2.result
+++ b/mysql-test/main/opt_trace_ucs2.result
@@ -7,6 +7,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -16,7 +17,9 @@ EXPLAIN
"key": "col1",
"key_length": "21",
"used_key_parts": ["col1"],
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.col1 >= 'a'"
}
@@ -38,7 +41,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives'))
"using_mrr": false,
"index_only": false,
"rows": 2,
- "cost": 2.547733708,
+ "cost": 0.003808422,
"chosen": true
}
],
diff --git a/mysql-test/main/opt_trace_ucs2.test b/mysql-test/main/opt_trace_ucs2.test
index 1a79ea9780e..7e06955d666 100644
--- a/mysql-test/main/opt_trace_ucs2.test
+++ b/mysql-test/main/opt_trace_ucs2.test
@@ -5,6 +5,7 @@ create or replace table t1 (col1 char(10) character set ucs2, filler char(100),
insert into t1 values ('a', 'a');
insert into t1 values ('a', 'a');
set optimizer_trace=1;
+--source include/explain-no-costs.inc
explain format=json select * from t1 force index(col1) where col1 >='a';
#enable after fix MDEV-27871
--disable_view_protocol
diff --git a/mysql-test/main/opt_tvc.result b/mysql-test/main/opt_tvc.result
index eaf75ed7999..c08c68eb7ee 100644
--- a/mysql-test/main/opt_tvc.result
+++ b/mysql-test/main/opt_tvc.result
@@ -46,12 +46,11 @@ a b
2 5
explain extended select * from t1 where a in (1,2);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`_col_1` = `test`.`t1`.`a`
explain extended select * from t1
where a in
(
@@ -59,12 +58,49 @@ select *
from (values (1),(2)) as tvc_0
);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
+3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`1` = `test`.`t1`.`a`
+select * from t1 where a in (1,2,2,2,3,4,5,6,7);
+a b
+1 2
+4 6
+1 1
+2 5
+7 8
+select * from t1
+where a in
+(
+select *
+from (values (1),(2),(2),(2),(2),(3),(4),(5),(6),(7)) as tvc_0
+);
+a b
+1 2
+4 6
+1 1
+2 5
+7 8
+explain extended select * from t1 where a in (1,2,2,2,3,4,5,6,7);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
+3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2),(2),(2),(3),(4),(5),(6),(7)) `tvc_0` join `test`.`t1` where `tvc_0`.`_col_1` = `test`.`t1`.`a`
+explain extended select * from t1
+where a in
+(
+select *
+from (values (1),(2),(2),(2),(2),(3),(4),(5),(6),(7)) as tvc_0
+);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2),(2),(2),(2),(3),(4),(5),(6),(7)) `tvc_0` join `test`.`t1` where `tvc_0`.`1` = `test`.`t1`.`a`
# AND-condition with IN-predicates in WHERE-part
select * from t1
where a in (1,2) and
@@ -90,15 +126,13 @@ explain extended select * from t1
where a in (1,2) and
b in (1,5);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
-4 MATERIALIZED <derived5> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
+1 PRIMARY <derived5> eq_ref distinct_key distinct_key 4 test.t1.b 1 100.00
5 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) semi join ((values (1),(5)) `tvc_1`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(5)) `tvc_1` join (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`_col_1` = `test`.`t1`.`a` and `tvc_1`.`_col_1` = `test`.`t1`.`b`
explain extended select * from t1
where a in
(
@@ -111,15 +145,13 @@ select *
from (values (1),(5)) as tvc_1
);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
-4 MATERIALIZED <derived5> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
+1 PRIMARY <derived5> eq_ref distinct_key distinct_key 4 test.t1.b 1 100.00
5 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) semi join ((values (1),(5)) `tvc_1`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(5)) `tvc_1` join (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`1` = `test`.`t1`.`a` and `tvc_1`.`1` = `test`.`t1`.`b`
# subquery with IN-predicate
select * from t1
where a in
@@ -150,11 +182,11 @@ from t2 where b in (3,4)
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived4> ALL NULL NULL NULL NULL 2 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 6 100.00 Using where; Using join buffer (flat, BNL join)
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 6 100.00 Using where
+2 MATERIALIZED <derived4> eq_ref distinct_key distinct_key 4 test.t2.b 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (3),(4)) `tvc_0` join `test`.`t2`) where `test`.`t2`.`b` = `tvc_0`.`_col_1`
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (3),(4)) `tvc_0` join `test`.`t2`) where `tvc_0`.`_col_1` = `test`.`t2`.`b`
explain extended select * from t1
where a in
(
@@ -168,11 +200,11 @@ from (values (3),(4)) as tvc_0
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived4> ALL NULL NULL NULL NULL 2 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 6 100.00 Using where; Using join buffer (flat, BNL join)
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 6 100.00 Using where
+2 MATERIALIZED <derived4> eq_ref distinct_key distinct_key 4 test.t2.b 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (3),(4)) `tvc_0` join `test`.`t2`) where `test`.`t2`.`b` = `tvc_0`.`3`
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (3),(4)) `tvc_0` join `test`.`t2`) where `tvc_0`.`3` = `test`.`t2`.`b`
# derived table with IN-predicate
select * from
(
@@ -206,12 +238,11 @@ from t1
where a in (1,2)
) as dr_table;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived4> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived4> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`_col_1` = `test`.`t1`.`a`
explain extended select * from
(
select *
@@ -224,12 +255,11 @@ as tvc_0
)
) as dr_table;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived4> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived4> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`1` = `test`.`t1`.`a`
# non-recursive CTE with IN-predicate
with tvc_0 as
(
@@ -265,12 +295,11 @@ where a in (1,2)
)
select * from tvc_0;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived4> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived4> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 with tvc_0 as (/* select#2 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where `test`.`t1`.`a` in (1,2))/* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
+Note 1003 with tvc_0 as (/* select#2 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (dual) join `test`.`t1` where `test`.`t1`.`a` in (1,2))/* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`_col_1` = `test`.`t1`.`a`
explain extended select * from
(
select *
@@ -283,12 +312,11 @@ as tvc_0
)
) as dr_table;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived4> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived4> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`1` = `test`.`t1`.`a`
# VIEW with IN-predicate
create view v1 as
select *
@@ -316,20 +344,18 @@ a b
2 5
explain extended select * from v1;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived4> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived4> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`_col_1` = `test`.`t1`.`a`
explain extended select * from v2;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived4> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived4> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`1` = `test`.`t1`.`a`
drop view v1,v2;
# subselect defined by derived table with IN-predicate
select * from t1
@@ -382,11 +408,11 @@ as dr_table
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived5> ALL NULL NULL NULL NULL 2 100.00
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 6 100.00 Using where; Using join buffer (flat, BNL join)
+2 MATERIALIZED t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+2 MATERIALIZED <derived5> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
5 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0` join `test`.`t1`) where `test`.`t1`.`a` = 1 and `test`.`t1`.`a` = `tvc_0`.`_col_1`
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0` join `test`.`t1`) where `test`.`t1`.`a` = 1 and `tvc_0`.`_col_1` = `test`.`t1`.`a`
explain extended select * from t1
where a in
(
@@ -407,11 +433,11 @@ as dr_table
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived5> ALL NULL NULL NULL NULL 2 100.00
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 6 100.00 Using where; Using join buffer (flat, BNL join)
+2 MATERIALIZED t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+2 MATERIALIZED <derived5> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
5 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0` join `test`.`t1`) where `test`.`t1`.`a` = 1 and `test`.`t1`.`a` = `tvc_0`.`1`
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0` join `test`.`t1`) where `test`.`t1`.`a` = 1 and `tvc_0`.`1` = `test`.`t1`.`a`
# derived table with IN-predicate and group by
select * from
(
@@ -444,13 +470,12 @@ where b in (3,5)
group by b
) as dr_table;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 12 100.00
-2 DERIVED t1 ALL NULL NULL NULL NULL 6 100.00 Using temporary; Using filesort
-2 DERIVED <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived4> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 6 100.00
+2 DERIVED t1 ALL NULL NULL NULL NULL 6 100.00 Using where; Using temporary; Using filesort
+2 DERIVED <derived4> eq_ref distinct_key distinct_key 4 test.t1.b 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `dr_table`.`max(a)` AS `max(a)`,`dr_table`.`b` AS `b` from (/* select#2 */ select max(`test`.`t1`.`a`) AS `max(a)`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (3),(5)) `tvc_0`) where 1 group by `test`.`t1`.`b`) `dr_table`
+Note 1003 /* select#1 */ select `dr_table`.`max(a)` AS `max(a)`,`dr_table`.`b` AS `b` from (/* select#2 */ select max(`test`.`t1`.`a`) AS `max(a)`,`test`.`t1`.`b` AS `b` from (values (3),(5)) `tvc_0` join `test`.`t1` where `tvc_0`.`_col_1` = `test`.`t1`.`b` group by `test`.`t1`.`b`) `dr_table`
explain extended select * from
(
select max(a),b
@@ -464,13 +489,12 @@ as tvc_0
group by b
) as dr_table;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 12 100.00
-2 DERIVED t1 ALL NULL NULL NULL NULL 6 100.00 Using temporary; Using filesort
-2 DERIVED <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived4> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 6 100.00
+2 DERIVED t1 ALL NULL NULL NULL NULL 6 100.00 Using where; Using temporary; Using filesort
+2 DERIVED <derived4> eq_ref distinct_key distinct_key 4 test.t1.b 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `dr_table`.`max(a)` AS `max(a)`,`dr_table`.`b` AS `b` from (/* select#2 */ select max(`test`.`t1`.`a`) AS `max(a)`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (3),(5)) `tvc_0`) where 1 group by `test`.`t1`.`b`) `dr_table`
+Note 1003 /* select#1 */ select `dr_table`.`max(a)` AS `max(a)`,`dr_table`.`b` AS `b` from (/* select#2 */ select max(`test`.`t1`.`a`) AS `max(a)`,`test`.`t1`.`b` AS `b` from (values (3),(5)) `tvc_0` join `test`.`t1` where `tvc_0`.`3` = `test`.`t1`.`b` group by `test`.`t1`.`b`) `dr_table`
# prepare statement
prepare stmt from "select * from t1 where a in (1,2)";
execute stmt;
@@ -506,12 +530,11 @@ a b
4 yq
explain extended select * from t3 where a in (1,4);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2 100.00
-1 PRIMARY t3 ref idx idx 5 tvc_0._col_1 3 100.00
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t3 ALL idx NULL NULL NULL 28 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t3.a 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b` from `test`.`t3` semi join ((values (1),(4)) `tvc_0`) where `test`.`t3`.`a` = `tvc_0`.`_col_1`
+Note 1003 /* select#1 */ select `test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b` from (values (1),(4)) `tvc_0` join `test`.`t3` where `tvc_0`.`_col_1` = `test`.`t3`.`a`
# use vectors in IN predeicate
set @@in_predicate_conversion_threshold= 4;
select * from t1 where (a,b) in ((1,2),(3,4));
@@ -519,14 +542,13 @@ a b
1 2
explain extended select * from t1 where (a,b) in ((1,2),(3,4));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1 100.00
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.b 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1,2),(3,4)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1,2),(3,4)) `tvc_0` join `test`.`t1` where `tvc_0`.`_col_1` = `test`.`t1`.`a` and `tvc_0`.`_col_2` = `test`.`t1`.`b`
set @@in_predicate_conversion_threshold= 2;
-# trasformation works for the one IN predicate and doesn't work for the other
+# transformation works for the one IN predicate and doesn't work for the other
set @@in_predicate_conversion_threshold= 5;
select * from t2
where (a,b) in ((1,2),(8,9)) and
@@ -539,11 +561,10 @@ where (a,b) in ((1,2),(8,9)) and
(a,c) in ((1,3),(8,0),(5,1));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 6 100.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1 100.00
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 3 100.00
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 8 test.t2.a,test.t2.c 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` semi join ((values (1,3),(8,0),(5,1)) `tvc_0`) where (`test`.`t2`.`a`,`test`.`t2`.`b`) in (<cache>((1,2)),<cache>((8,9)))
+Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from (values (1,3),(8,0),(5,1)) `tvc_0` join `test`.`t2` where `tvc_0`.`_col_1` = `test`.`t2`.`a` and `tvc_0`.`_col_2` = `test`.`t2`.`c` and (`test`.`t2`.`a`,`test`.`t2`.`b`) in (<cache>((1,2)),<cache>((8,9)))
set @@in_predicate_conversion_threshold= 2;
#
# mdev-14281: conversion of NOT IN predicate into subquery predicate
@@ -568,18 +589,18 @@ explain extended select * from t1
where (a,b) not in ((1,2),(8,9), (5,1));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 8 func,func 2 100.00 Using where; Full scan on NULL key
+2 DEPENDENT SUBQUERY <derived3> unique_subquery distinct_key distinct_key 8 func,func 1 100.00 Using where; Full scan on NULL key
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where !<expr_cache><`test`.`t1`.`a`,`test`.`t1`.`b`>(<in_optimizer>((`test`.`t1`.`a`,`test`.`t1`.`b`),<exists>(<index_lookup>(<cache>(`test`.`t1`.`a`) in (temporary) on key0 where trigcond(<cache>(`test`.`t1`.`a`) = `tvc_0`.`_col_1`) and trigcond(<cache>(`test`.`t1`.`b`) = `tvc_0`.`_col_2`)))))
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where !<expr_cache><`test`.`t1`.`a`,`test`.`t1`.`b`>(<in_optimizer>((`test`.`t1`.`a`,`test`.`t1`.`b`),<exists>(<primary_index_lookup>(<cache>(`test`.`t1`.`a`) in <temporary table> on distinct_key where trigcond(<cache>(`test`.`t1`.`a`) = `tvc_0`.`_col_1`) and trigcond(<cache>(`test`.`t1`.`b`) = `tvc_0`.`_col_2`)))))
explain extended select * from t1
where (a,b) not in (select * from (values (1,2),(8,9), (5,1)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 8 func,func 2 100.00 Using where; Full scan on NULL key
+2 DEPENDENT SUBQUERY <derived3> unique_subquery distinct_key distinct_key 8 func,func 1 100.00 Using where; Full scan on NULL key
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where !<expr_cache><`test`.`t1`.`a`,`test`.`t1`.`b`>(<in_optimizer>((`test`.`t1`.`a`,`test`.`t1`.`b`),<exists>(<index_lookup>(<cache>(`test`.`t1`.`a`) in (temporary) on key0 where trigcond(<cache>(`test`.`t1`.`a`) = `tvc_0`.`1`) and trigcond(<cache>(`test`.`t1`.`b`) = `tvc_0`.`2`)))))
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where !<expr_cache><`test`.`t1`.`a`,`test`.`t1`.`b`>(<in_optimizer>((`test`.`t1`.`a`,`test`.`t1`.`b`),<exists>(<primary_index_lookup>(<cache>(`test`.`t1`.`a`) in <temporary table> on distinct_key where trigcond(<cache>(`test`.`t1`.`a`) = `tvc_0`.`1`) and trigcond(<cache>(`test`.`t1`.`b`) = `tvc_0`.`2`)))))
select * from t1
where b < 7 and (a,b) not in ((1,2),(8,9), (5,1));
a b
@@ -590,10 +611,10 @@ explain extended select * from t1
where b < 7 and (a,b) not in ((1,2),(8,9), (5,1));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 8 func,func 2 100.00 Using where; Full scan on NULL key
+2 DEPENDENT SUBQUERY <derived3> unique_subquery distinct_key distinct_key 8 func,func 1 100.00 Using where; Full scan on NULL key
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` < 7 and !<expr_cache><`test`.`t1`.`a`,`test`.`t1`.`b`>(<in_optimizer>((`test`.`t1`.`a`,`test`.`t1`.`b`),<exists>(<index_lookup>(<cache>(`test`.`t1`.`a`) in (temporary) on key0 where trigcond(<cache>(`test`.`t1`.`a`) = `tvc_0`.`_col_1`) and trigcond(<cache>(`test`.`t1`.`b`) = `tvc_0`.`_col_2`)))))
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` < 7 and !<expr_cache><`test`.`t1`.`a`,`test`.`t1`.`b`>(<in_optimizer>((`test`.`t1`.`a`,`test`.`t1`.`b`),<exists>(<primary_index_lookup>(<cache>(`test`.`t1`.`a`) in <temporary table> on distinct_key where trigcond(<cache>(`test`.`t1`.`a`) = `tvc_0`.`_col_1`) and trigcond(<cache>(`test`.`t1`.`b`) = `tvc_0`.`_col_2`)))))
select * from t2
where (a,c) not in ((1,2),(8,9), (5,1));
a b c
@@ -606,10 +627,10 @@ explain extended select * from t2
where (a,c) not in ((1,2),(8,9), (5,1));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 8 func,func 2 100.00 Using where; Full scan on NULL key
+2 DEPENDENT SUBQUERY <derived3> unique_subquery distinct_key distinct_key 8 func,func 1 100.00 Using where; Full scan on NULL key
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where !<expr_cache><`test`.`t2`.`a`,`test`.`t2`.`c`>(<in_optimizer>((`test`.`t2`.`a`,`test`.`t2`.`c`),<exists>(<index_lookup>(<cache>(`test`.`t2`.`a`) in (temporary) on key0 where trigcond(<cache>(`test`.`t2`.`a`) = `tvc_0`.`_col_1`) and trigcond(<cache>(`test`.`t2`.`c`) = `tvc_0`.`_col_2`)))))
+Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where !<expr_cache><`test`.`t2`.`a`,`test`.`t2`.`c`>(<in_optimizer>((`test`.`t2`.`a`,`test`.`t2`.`c`),<exists>(<primary_index_lookup>(<cache>(`test`.`t2`.`a`) in <temporary table> on distinct_key where trigcond(<cache>(`test`.`t2`.`a`) = `tvc_0`.`_col_1`) and trigcond(<cache>(`test`.`t2`.`c`) = `tvc_0`.`_col_2`)))))
drop table t1, t2, t3;
set @@in_predicate_conversion_threshold= default;
#
diff --git a/mysql-test/main/opt_tvc.test b/mysql-test/main/opt_tvc.test
index f8469f22aa1..89bede851eb 100644
--- a/mysql-test/main/opt_tvc.test
+++ b/mysql-test/main/opt_tvc.test
@@ -54,6 +54,23 @@ eval $optimized_query;
eval explain extended $query;
eval explain extended $optimized_query;
+
+let $query= select * from t1 where a in (1,2,2,2,3,4,5,6,7);
+
+let $optimized_query=
+select * from t1
+where a in
+ (
+ select *
+ from (values (1),(2),(2),(2),(2),(3),(4),(5),(6),(7)) as tvc_0
+ );
+
+eval $query;
+eval $optimized_query;
+eval explain extended $query;
+eval explain extended $optimized_query;
+
+
--echo # AND-condition with IN-predicates in WHERE-part
let $query=
@@ -276,7 +293,7 @@ eval $query;
eval explain extended $query;
set @@in_predicate_conversion_threshold= 2;
---echo # trasformation works for the one IN predicate and doesn't work for the other
+--echo # transformation works for the one IN predicate and doesn't work for the other
set @@in_predicate_conversion_threshold= 5;
diff --git a/mysql-test/main/optimizer_costs.result b/mysql-test/main/optimizer_costs.result
new file mode 100644
index 00000000000..797c6172b26
--- /dev/null
+++ b/mysql-test/main/optimizer_costs.result
@@ -0,0 +1,347 @@
+select table_name,engine from information_schema.tables where table_name="optimizer_costs";
+table_name engine
+OPTIMIZER_COSTS MEMORY
+show create table information_schema.optimizer_costs;
+Table Create Table
+OPTIMIZER_COSTS CREATE TEMPORARY TABLE `OPTIMIZER_COSTS` (
+ `ENGINE` varchar(192) NOT NULL,
+ `OPTIMIZER_DISK_READ_COST` decimal(9,6) NOT NULL,
+ `OPTIMIZER_INDEX_BLOCK_COPY_COST` decimal(9,6) NOT NULL,
+ `OPTIMIZER_KEY_COMPARE_COST` decimal(9,6) NOT NULL,
+ `OPTIMIZER_KEY_COPY_COST` decimal(9,6) NOT NULL,
+ `OPTIMIZER_KEY_LOOKUP_COST` decimal(9,6) NOT NULL,
+ `OPTIMIZER_KEY_NEXT_FIND_COST` decimal(9,6) NOT NULL,
+ `OPTIMIZER_DISK_READ_RATIO` decimal(9,6) NOT NULL,
+ `OPTIMIZER_ROW_COPY_COST` decimal(9,6) NOT NULL,
+ `OPTIMIZER_ROW_LOOKUP_COST` decimal(9,6) NOT NULL,
+ `OPTIMIZER_ROW_NEXT_FIND_COST` decimal(9,6) NOT NULL,
+ `OPTIMIZER_ROWID_COMPARE_COST` decimal(9,6) NOT NULL,
+ `OPTIMIZER_ROWID_COPY_COST` decimal(9,6) NOT NULL
+) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci
+select * from information_schema.optimizer_costs where engine in
+("memory","innodb","aria","default") order by engine;
+ENGINE Aria
+OPTIMIZER_DISK_READ_COST 10.240000
+OPTIMIZER_INDEX_BLOCK_COPY_COST 0.035600
+OPTIMIZER_KEY_COMPARE_COST 0.011361
+OPTIMIZER_KEY_COPY_COST 0.015685
+OPTIMIZER_KEY_LOOKUP_COST 0.435777
+OPTIMIZER_KEY_NEXT_FIND_COST 0.082347
+OPTIMIZER_DISK_READ_RATIO 0.020000
+OPTIMIZER_ROW_COPY_COST 0.060866
+OPTIMIZER_ROW_LOOKUP_COST 0.130839
+OPTIMIZER_ROW_NEXT_FIND_COST 0.045916
+OPTIMIZER_ROWID_COMPARE_COST 0.002653
+OPTIMIZER_ROWID_COPY_COST 0.002653
+ENGINE default
+OPTIMIZER_DISK_READ_COST 10.240000
+OPTIMIZER_INDEX_BLOCK_COPY_COST 0.035600
+OPTIMIZER_KEY_COMPARE_COST 0.011361
+OPTIMIZER_KEY_COPY_COST 0.015685
+OPTIMIZER_KEY_LOOKUP_COST 0.435777
+OPTIMIZER_KEY_NEXT_FIND_COST 0.082347
+OPTIMIZER_DISK_READ_RATIO 0.020000
+OPTIMIZER_ROW_COPY_COST 0.060866
+OPTIMIZER_ROW_LOOKUP_COST 0.130839
+OPTIMIZER_ROW_NEXT_FIND_COST 0.045916
+OPTIMIZER_ROWID_COMPARE_COST 0.002653
+OPTIMIZER_ROWID_COPY_COST 0.002653
+ENGINE InnoDB
+OPTIMIZER_DISK_READ_COST 10.240000
+OPTIMIZER_INDEX_BLOCK_COPY_COST 0.035600
+OPTIMIZER_KEY_COMPARE_COST 0.011361
+OPTIMIZER_KEY_COPY_COST 0.015685
+OPTIMIZER_KEY_LOOKUP_COST 0.791120
+OPTIMIZER_KEY_NEXT_FIND_COST 0.099000
+OPTIMIZER_DISK_READ_RATIO 0.020000
+OPTIMIZER_ROW_COPY_COST 0.060870
+OPTIMIZER_ROW_LOOKUP_COST 0.765970
+OPTIMIZER_ROW_NEXT_FIND_COST 0.070130
+OPTIMIZER_ROWID_COMPARE_COST 0.002653
+OPTIMIZER_ROWID_COPY_COST 0.002653
+ENGINE MEMORY
+OPTIMIZER_DISK_READ_COST 0.000000
+OPTIMIZER_INDEX_BLOCK_COPY_COST 0.000000
+OPTIMIZER_KEY_COMPARE_COST 0.011361
+OPTIMIZER_KEY_COPY_COST 0.000000
+OPTIMIZER_KEY_LOOKUP_COST 0.000000
+OPTIMIZER_KEY_NEXT_FIND_COST 0.000000
+OPTIMIZER_DISK_READ_RATIO 0.000000
+OPTIMIZER_ROW_COPY_COST 0.002334
+OPTIMIZER_ROW_LOOKUP_COST 0.000000
+OPTIMIZER_ROW_NEXT_FIND_COST 0.000000
+OPTIMIZER_ROWID_COMPARE_COST 0.002653
+OPTIMIZER_ROWID_COPY_COST 0.002653
+show variables like "optimizer%cost";
+Variable_name Value
+optimizer_disk_read_cost 10.240000
+optimizer_index_block_copy_cost 0.035600
+optimizer_key_compare_cost 0.011361
+optimizer_key_copy_cost 0.015685
+optimizer_key_lookup_cost 0.435777
+optimizer_key_next_find_cost 0.082347
+optimizer_row_copy_cost 0.060866
+optimizer_row_lookup_cost 0.130839
+optimizer_row_next_find_cost 0.045916
+optimizer_rowid_compare_cost 0.002653
+optimizer_rowid_copy_cost 0.002653
+optimizer_scan_setup_cost 10.000000
+optimizer_where_cost 0.032000
+show variables like "optimizer_disk_read_ratio";
+Variable_name Value
+optimizer_disk_read_ratio 0.020000
+#
+# Test change some 'default' variables
+#
+SELECT @@optimizer_disk_read_ratio,@@optimizer_index_block_copy_cost;
+@@optimizer_disk_read_ratio @@optimizer_index_block_copy_cost
+0.020000 0.035600
+SET global optimizer_disk_read_ratio=0.8;
+SET global optimizer_index_block_copy_cost=0.1;
+SELECT @@optimizer_disk_read_ratio,@@optimizer_index_block_copy_cost;
+@@optimizer_disk_read_ratio @@optimizer_index_block_copy_cost
+0.800000 0.100000
+select optimizer_disk_read_ratio,optimizer_index_block_copy_cost from information_schema.optimizer_costs where engine='default';
+optimizer_disk_read_ratio optimizer_index_block_copy_cost
+0.800000 0.100000
+SET global optimizer_disk_read_ratio=default;
+SET global optimizer_index_block_copy_cost=default;
+SELECT @@optimizer_disk_read_ratio,@@optimizer_index_block_copy_cost;
+@@optimizer_disk_read_ratio @@optimizer_index_block_copy_cost
+0.020000 0.035600
+#
+# Test change some 'engine' variables
+#
+select @@MEMORY.optimizer_row_lookup_cost;
+@@MEMORY.optimizer_row_lookup_cost
+0.000000
+set @tmp=@@MEMORY.optimizer_row_lookup_cost;
+set @@global.MEMORY.optimizer_row_lookup_cost=1;
+select @@MEMORY.optimizer_row_lookup_cost;
+@@MEMORY.optimizer_row_lookup_cost
+1.000000
+set @@global.MEMORY.optimizer_row_lookup_cost=default;
+select @@MEMORY.optimizer_row_lookup_cost;
+@@MEMORY.optimizer_row_lookup_cost
+0.130839
+set @@global.MEMORY.optimizer_row_lookup_cost=@tmp;
+select @@MEMORY.optimizer_row_lookup_cost;
+@@MEMORY.optimizer_row_lookup_cost
+0.000000
+#
+# Print variables with different syntaxes
+#
+SHOW VARIABLES like "optimizer_row_lookup_cost";
+Variable_name Value
+optimizer_row_lookup_cost 0.130839
+SELECT @@optimizer_row_lookup_cost;
+@@optimizer_row_lookup_cost
+0.130839
+SELECT @@global.default.optimizer_row_lookup_cost;
+@@global.default.optimizer_row_lookup_cost
+0.130839
+SELECT @@global.default.`optimizer_row_lookup_cost`;
+@@global.default.`optimizer_row_lookup_cost`
+0.130839
+SELECT @@MEMORY.optimizer_row_lookup_cost;
+@@MEMORY.optimizer_row_lookup_cost
+0.000000
+SELECT @@memory.optimizer_row_lookup_cost;
+@@memory.optimizer_row_lookup_cost
+0.000000
+SELECT @@InnoDB.optimizer_row_lookup_cost;
+@@InnoDB.optimizer_row_lookup_cost
+0.765970
+#
+# Accessing not existing cost
+#
+SELECT @@not_existing.optimizer_row_lookup_cost;
+@@not_existing.optimizer_row_lookup_cost
+0.130839
+SELECT @@NOT_existing.optimizer_row_lookup_cost;
+@@NOT_existing.optimizer_row_lookup_cost
+0.130839
+select engine from information_schema.optimizer_costs where engine like '%existing';
+engine
+#
+# Creating a new cost structure
+#
+SET global new_engine.optimizer_disk_read_cost=100;
+select * from information_schema.optimizer_costs where engine like 'new_engine';
+ENGINE OPTIMIZER_DISK_READ_COST OPTIMIZER_INDEX_BLOCK_COPY_COST OPTIMIZER_KEY_COMPARE_COST OPTIMIZER_KEY_COPY_COST OPTIMIZER_KEY_LOOKUP_COST OPTIMIZER_KEY_NEXT_FIND_COST OPTIMIZER_DISK_READ_RATIO OPTIMIZER_ROW_COPY_COST OPTIMIZER_ROW_LOOKUP_COST OPTIMIZER_ROW_NEXT_FIND_COST OPTIMIZER_ROWID_COMPARE_COST OPTIMIZER_ROWID_COPY_COST
+new_engine 100.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000
+select @@new_engine.optimizer_disk_read_cost, @@new_engine.optimizer_row_copy_cost;
+@@new_engine.optimizer_disk_read_cost @@new_engine.optimizer_row_copy_cost
+100.000000 -1.000000
+#
+# Errors
+#
+SELECT @@default.optimizer_disk_read_cost;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'default.optimizer_disk_read_cost' at line 1
+set global Aria.optimizer_disk_read_cost=NULL;
+ERROR 42000: Incorrect argument type to variable 'optimizer_disk_read_cost'
+set @tmp=@@Aria.optimizer_disk_read_cost;
+SET global Aria.optimizer_disk_read_cost=-1;
+Warnings:
+Warning 1292 Truncated incorrect optimizer_disk_read_cost value: '-1'
+select @@Aria.optimizer_disk_read_cost;
+@@Aria.optimizer_disk_read_cost
+0.000000
+SET global Aria.optimizer_disk_read_cost=200000;
+Warnings:
+Warning 1292 Truncated incorrect optimizer_disk_read_cost value: '200000'
+select @@Aria.optimizer_disk_read_cost;
+@@Aria.optimizer_disk_read_cost
+10000.000000
+set global Aria.optimizer_disk_read_cost=@tmp;
+select @@Aria.optimizer_disk_read_cost;
+@@Aria.optimizer_disk_read_cost
+10.240000
+#
+# Test of cost of ref compared to table scan + join_cache
+#
+create or replace table t1 (p int primary key, a char(10)) engine=myisam;
+create or replace table t2 (p int primary key, i int, a char(10), key k2(a)) engine=myisam;
+insert into t2 select seq,seq,'a' from seq_1_to_512;
+insert into t1 select seq,'a' from seq_1_to_4;
+explain select count(*) from t1, t2 where t1.p = t2.i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 4 Using index
+1 SIMPLE t2 ALL NULL NULL NULL NULL 512 Using where; Using join buffer (flat, BNL join)
+insert into t1 select seq,'a' from seq_5_to_10;
+explain select count(*) from t1, t2 where t1.p = t2.i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 512 Using where
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.i 1 Using index
+drop table t1,t2;
+#
+# Test of optimizer_scan_setup_cost
+#
+create table t1 (p int primary key, a char(10)) engine=myisam;
+create table t2 (p int primary key, i int, a char(10), key k1(i), key k2(a)) engine=myisam;
+insert into t1 values (2, 'qqqq'), (11, 'yyyy');
+insert into t2 values (1, 2, 'qqqq'), (2, 2, 'pppp'),
+(3, 2, 'yyyy'), (4, 3, 'zzzz');
+set @org_myisam_disk_read_ratio=@@myisam.optimizer_disk_read_ratio;
+set @@optimizer_scan_setup_cost=10,@@global.myisam.optimizer_disk_read_ratio=0.2;
+flush tables;
+explain select sum(t2.p+length(t1.a)) from t1, t2 where t1.p = t2.i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 2
+1 SIMPLE t2 ref k1 k1 5 test.t1.p 1
+set @@optimizer_scan_setup_cost=0.0, @@global.myisam.optimizer_disk_read_ratio=0.0;
+flush tables;
+explain select sum(t2.p+length(t1.a)) from t1, t2 where t1.p = t2.i;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 2
+1 SIMPLE t2 ALL k1 NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
+set @@optimizer_scan_setup_cost=default,@@global.myisam.optimizer_disk_read_ratio=@org_myisam_disk_read_ratio;
+flush tables;
+drop table t1,t2;
+#
+# Test of group by optimization
+#
+set @@optimizer_scan_setup_cost=0;
+CREATE TABLE t1 (id INT NOT NULL, a DATE, KEY(id,a)) engine=myisam;
+INSERT INTO t1 values (1,'2001-01-01'),(1,'2001-01-02'),
+(1,'2001-01-03'),(1,'2001-01-04'),
+(2,'2001-01-01'),(2,'2001-01-02'),
+(2,'2001-01-03'),(2,'2001-01-04'),
+(3,'2001-01-01'),(3,'2001-01-02'),
+(3,'2001-01-03'),(3,'2001-01-04'),
+(4,'2001-01-01'),(4,'2001-01-02'),
+(4,'2001-01-03'),(4,'2001-01-04');
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL id 8 NULL 16 Using where; Using index
+insert into t1 values (3,'2001-01-03'),(3,'2001-01-04');
+insert into t1 values (3,'2001-01-03'),(3,'2001-01-04');
+insert into t1 values (3,'2001-01-03'),(3,'2001-01-04');
+insert into t1 values (3,'2001-01-03'),(3,'2001-01-04');
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL id 8 NULL 5 Using where; Using index for group-by
+drop table t1;
+set @@optimizer_scan_setup_cost=default;
+#
+# Test of straight join costs
+#
+create table t1 (l_orderkey int(11) NOT NULL,
+l_partkey int(11) DEFAULT NULL,
+l_suppkey int(11) DEFAULT NULL,
+PRIMARY KEY (l_orderkey)) engine=aria;
+insert into t1 select seq,seq,seq from seq_1_to_1000;
+explain select straight_join count(*) from seq_1_to_10000,t1 where seq=l_orderkey;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE seq_1_to_10000 index PRIMARY PRIMARY 8 NULL 10000 Using index
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.seq_1_to_10000.seq 1 Using where; Using index
+show status like "last_query_cost";
+Variable_name Value
+Last_query_cost 5.641229
+set @org_cost=@@aria.optimizer_key_next_find_cost;
+set global aria.optimizer_key_next_find_cost=1000;
+flush tables;
+explain select count(*) from seq_1_to_10000,t1 where seq=l_orderkey;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE seq_1_to_10000 index PRIMARY PRIMARY 8 NULL 10000 Using index
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.seq_1_to_10000.seq 1 Using where; Using index
+show status like "last_query_cost";
+Variable_name Value
+Last_query_cost 5.641229
+set global aria.optimizer_key_next_find_cost=@org_cost;
+drop table t1;
+#
+# Testing distinct group optimization
+#
+create table t1 (a int, b int, key(a,b));
+insert into t1 select seq,seq from seq_1_to_1000;
+explain select count(distinct a,b) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL a 10 NULL 1000 Using index for group-by (scanning)
+explain select count(distinct a,b) from t1 where a>100;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 10 NULL 901 Using where; Using index for group-by (scanning)
+explain select count(distinct a,b) from t1 where a>800;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 5 NULL 206 Using where; Using index
+update t1 set a=mod(a,10);
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+explain select count(distinct a,b) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL a 10 NULL 1000 Using index for group-by (scanning)
+explain select count(distinct a,b) from t1 where a>1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 10 NULL 788 Using where; Using index for group-by (scanning)
+explain select count(distinct a,b) from t1 where a>8;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 5 NULL 109 Using where; Using index
+update t1 set b=mod(b,2);
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+explain select count(distinct a,b) from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL a 10 NULL 11 Using index for group-by
+explain select count(distinct a,b) from t1 where a>1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 10 NULL 9 Using where; Using index for group-by
+explain select count(distinct a,b) from t1 where a>8;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 10 NULL 1 Using where; Using index for group-by
+drop table t1;
+#
+# cleanup
+#
+"New cost structures: 1 (should be 1)"
diff --git a/mysql-test/main/optimizer_costs.test b/mysql-test/main/optimizer_costs.test
new file mode 100644
index 00000000000..bd7e89a58cc
--- /dev/null
+++ b/mysql-test/main/optimizer_costs.test
@@ -0,0 +1,189 @@
+#
+# Test of optimizer_costs
+#
+--source include/have_innodb.inc
+--source include/have_sequence.inc
+
+select table_name,engine from information_schema.tables where table_name="optimizer_costs";
+show create table information_schema.optimizer_costs;
+let $start_engines=`select count(*) from information_schema.optimizer_costs`;
+--vertical_results
+select * from information_schema.optimizer_costs where engine in
+("memory","innodb","aria","default") order by engine;
+--horizontal_results
+show variables like "optimizer%cost";
+show variables like "optimizer_disk_read_ratio";
+
+--echo #
+--echo # Test change some 'default' variables
+--echo #
+SELECT @@optimizer_disk_read_ratio,@@optimizer_index_block_copy_cost;
+SET global optimizer_disk_read_ratio=0.8;
+SET global optimizer_index_block_copy_cost=0.1;
+SELECT @@optimizer_disk_read_ratio,@@optimizer_index_block_copy_cost;
+select optimizer_disk_read_ratio,optimizer_index_block_copy_cost from information_schema.optimizer_costs where engine='default';
+SET global optimizer_disk_read_ratio=default;
+SET global optimizer_index_block_copy_cost=default;
+SELECT @@optimizer_disk_read_ratio,@@optimizer_index_block_copy_cost;
+
+--echo #
+--echo # Test change some 'engine' variables
+--echo #
+select @@MEMORY.optimizer_row_lookup_cost;
+set @tmp=@@MEMORY.optimizer_row_lookup_cost;
+set @@global.MEMORY.optimizer_row_lookup_cost=1;
+select @@MEMORY.optimizer_row_lookup_cost;
+set @@global.MEMORY.optimizer_row_lookup_cost=default;
+select @@MEMORY.optimizer_row_lookup_cost;
+set @@global.MEMORY.optimizer_row_lookup_cost=@tmp;
+select @@MEMORY.optimizer_row_lookup_cost;
+
+--echo #
+--echo # Print variables with different syntaxes
+--echo #
+SHOW VARIABLES like "optimizer_row_lookup_cost";
+SELECT @@optimizer_row_lookup_cost;
+SELECT @@global.default.optimizer_row_lookup_cost;
+SELECT @@global.default.`optimizer_row_lookup_cost`;
+SELECT @@MEMORY.optimizer_row_lookup_cost;
+SELECT @@memory.optimizer_row_lookup_cost;
+SELECT @@InnoDB.optimizer_row_lookup_cost;
+
+--echo #
+--echo # Accessing not existing cost
+--echo #
+SELECT @@not_existing.optimizer_row_lookup_cost;
+SELECT @@NOT_existing.optimizer_row_lookup_cost;
+select engine from information_schema.optimizer_costs where engine like '%existing';
+
+--echo #
+--echo # Creating a new cost structure
+--echo #
+SET global new_engine.optimizer_disk_read_cost=100;
+select * from information_schema.optimizer_costs where engine like 'new_engine';
+select @@new_engine.optimizer_disk_read_cost, @@new_engine.optimizer_row_copy_cost;
+
+--echo #
+--echo # Errors
+--echo #
+--error ER_PARSE_ERROR
+SELECT @@default.optimizer_disk_read_cost;
+--error ER_WRONG_TYPE_FOR_VAR
+set global Aria.optimizer_disk_read_cost=NULL;
+
+set @tmp=@@Aria.optimizer_disk_read_cost;
+SET global Aria.optimizer_disk_read_cost=-1;
+select @@Aria.optimizer_disk_read_cost;
+SET global Aria.optimizer_disk_read_cost=200000;
+select @@Aria.optimizer_disk_read_cost;
+set global Aria.optimizer_disk_read_cost=@tmp;
+select @@Aria.optimizer_disk_read_cost;
+
+--echo #
+--echo # Test of cost of ref compared to table scan + join_cache
+--echo #
+
+create or replace table t1 (p int primary key, a char(10)) engine=myisam;
+create or replace table t2 (p int primary key, i int, a char(10), key k2(a)) engine=myisam;
+insert into t2 select seq,seq,'a' from seq_1_to_512;
+
+insert into t1 select seq,'a' from seq_1_to_4;
+explain select count(*) from t1, t2 where t1.p = t2.i;
+insert into t1 select seq,'a' from seq_5_to_10;
+explain select count(*) from t1, t2 where t1.p = t2.i;
+
+drop table t1,t2;
+
+--echo #
+--echo # Test of optimizer_scan_setup_cost
+--echo #
+
+create table t1 (p int primary key, a char(10)) engine=myisam;
+create table t2 (p int primary key, i int, a char(10), key k1(i), key k2(a)) engine=myisam;
+insert into t1 values (2, 'qqqq'), (11, 'yyyy');
+insert into t2 values (1, 2, 'qqqq'), (2, 2, 'pppp'),
+ (3, 2, 'yyyy'), (4, 3, 'zzzz');
+set @org_myisam_disk_read_ratio=@@myisam.optimizer_disk_read_ratio;
+set @@optimizer_scan_setup_cost=10,@@global.myisam.optimizer_disk_read_ratio=0.2;
+flush tables;
+explain select sum(t2.p+length(t1.a)) from t1, t2 where t1.p = t2.i;
+set @@optimizer_scan_setup_cost=0.0, @@global.myisam.optimizer_disk_read_ratio=0.0;
+flush tables;
+explain select sum(t2.p+length(t1.a)) from t1, t2 where t1.p = t2.i;
+set @@optimizer_scan_setup_cost=default,@@global.myisam.optimizer_disk_read_ratio=@org_myisam_disk_read_ratio;
+flush tables;
+drop table t1,t2;
+
+--echo #
+--echo # Test of group by optimization
+--echo #
+
+set @@optimizer_scan_setup_cost=0;
+CREATE TABLE t1 (id INT NOT NULL, a DATE, KEY(id,a)) engine=myisam;
+INSERT INTO t1 values (1,'2001-01-01'),(1,'2001-01-02'),
+(1,'2001-01-03'),(1,'2001-01-04'),
+(2,'2001-01-01'),(2,'2001-01-02'),
+(2,'2001-01-03'),(2,'2001-01-04'),
+(3,'2001-01-01'),(3,'2001-01-02'),
+(3,'2001-01-03'),(3,'2001-01-04'),
+(4,'2001-01-01'),(4,'2001-01-02'),
+(4,'2001-01-03'),(4,'2001-01-04');
+analyze table t1;
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id;
+insert into t1 values (3,'2001-01-03'),(3,'2001-01-04');
+insert into t1 values (3,'2001-01-03'),(3,'2001-01-04');
+insert into t1 values (3,'2001-01-03'),(3,'2001-01-04');
+insert into t1 values (3,'2001-01-03'),(3,'2001-01-04');
+analyze table t1;
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id;
+drop table t1;
+set @@optimizer_scan_setup_cost=default;
+
+--echo #
+--echo # Test of straight join costs
+--echo #
+create table t1 (l_orderkey int(11) NOT NULL,
+ l_partkey int(11) DEFAULT NULL,
+ l_suppkey int(11) DEFAULT NULL,
+ PRIMARY KEY (l_orderkey)) engine=aria;
+insert into t1 select seq,seq,seq from seq_1_to_1000;
+explain select straight_join count(*) from seq_1_to_10000,t1 where seq=l_orderkey;
+show status like "last_query_cost";
+set @org_cost=@@aria.optimizer_key_next_find_cost;
+# Set cost for t1 high so that we cannot use it for index scans
+set global aria.optimizer_key_next_find_cost=1000;
+flush tables;
+explain select count(*) from seq_1_to_10000,t1 where seq=l_orderkey;
+show status like "last_query_cost";
+set global aria.optimizer_key_next_find_cost=@org_cost;
+drop table t1;
+
+--echo #
+--echo # Testing distinct group optimization
+--echo #
+
+create table t1 (a int, b int, key(a,b));
+insert into t1 select seq,seq from seq_1_to_1000;
+explain select count(distinct a,b) from t1;
+explain select count(distinct a,b) from t1 where a>100;
+explain select count(distinct a,b) from t1 where a>800;
+update t1 set a=mod(a,10);
+analyze table t1;
+explain select count(distinct a,b) from t1;
+explain select count(distinct a,b) from t1 where a>1;
+explain select count(distinct a,b) from t1 where a>8;
+update t1 set b=mod(b,2);
+analyze table t1;
+explain select count(distinct a,b) from t1;
+explain select count(distinct a,b) from t1 where a>1;
+explain select count(distinct a,b) from t1 where a>8;
+drop table t1;
+
+--echo #
+--echo # cleanup
+--echo #
+
+let $end_engines=`select count(*) from information_schema.optimizer_costs`;
+let $diff=`select $end_engines - $start_engines`;
+--echo "New cost structures: $diff (should be 1)"
+
diff --git a/mysql-test/main/optimizer_costs2.opt b/mysql-test/main/optimizer_costs2.opt
new file mode 100644
index 00000000000..718ccafc05e
--- /dev/null
+++ b/mysql-test/main/optimizer_costs2.opt
@@ -0,0 +1 @@
+--optimizer_disk_read_ratio=0.9 --MEMORY.optimizer_disk_read_ratio=0.1 --memory.optimizer_disk_read_ratio=0.3 --memory.optimizer_row_lookup_cost=0.8
diff --git a/mysql-test/main/optimizer_costs2.result b/mysql-test/main/optimizer_costs2.result
new file mode 100644
index 00000000000..688dcb51110
--- /dev/null
+++ b/mysql-test/main/optimizer_costs2.result
@@ -0,0 +1,8 @@
+select engine,optimizer_disk_read_ratio from information_schema.optimizer_costs where engine in ("memory","aria","default");
+engine optimizer_disk_read_ratio
+default 0.900000
+MEMORY 0.300000
+Aria 0.900000
+select @@memory.optimizer_row_lookup_cost;
+@@memory.optimizer_row_lookup_cost
+0.800000
diff --git a/mysql-test/main/optimizer_costs2.test b/mysql-test/main/optimizer_costs2.test
new file mode 100644
index 00000000000..0445ce523cd
--- /dev/null
+++ b/mysql-test/main/optimizer_costs2.test
@@ -0,0 +1,6 @@
+#
+# Check default optimizer_cost_arguments
+#
+
+select engine,optimizer_disk_read_ratio from information_schema.optimizer_costs where engine in ("memory","aria","default");
+select @@memory.optimizer_row_lookup_cost;
diff --git a/mysql-test/main/order_by.result b/mysql-test/main/order_by.result
index 1311f42dac2..96f5d9a49c3 100644
--- a/mysql-test/main/order_by.result
+++ b/mysql-test/main/order_by.result
@@ -1192,7 +1192,10 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index k2 k3 5 NULL 111 Using where
EXPLAIN SELECT id,c3 FROM t2 WHERE c2=11 ORDER BY c3 LIMIT 4000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 index k2 k3 5 NULL 22318 Using where
+1 SIMPLE t2 range k2 k2 5 NULL 7341 Using index condition; Using filesort
+EXPLAIN SELECT id,c3 FROM t2 WHERE c2=11 ORDER BY c3 LIMIT 6000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL k2 NULL NULL NULL 40960 Using where; Using filesort
EXPLAIN SELECT id,c3 FROM t2 WHERE c2 BETWEEN 10 AND 12 ORDER BY c3 LIMIT 20;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index k2 k3 5 NULL 73 Using where
@@ -1221,6 +1224,10 @@ id c3
176 14
186 14
196 14
+ALTER TABLE t2 DROP INDEX k3, ADD INDEX k3 (c3,c2);
+EXPLAIN SELECT c3 FROM t2 WHERE c2=11 ORDER BY c3 LIMIT 4000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 index k2 k3 10 NULL 22318 Using where; Using index
DROP TABLE t1,t2;
CREATE TABLE t1 (
a INT,
@@ -1548,6 +1555,56 @@ UNIQUE KEY a_c (a,c),
KEY (a));
INSERT INTO t1 VALUES (1, 10), (2, NULL);
# Must use ref-or-null on the a_c index
+ANALYZE FORMAT=JSON
+SELECT 1 AS col FROM t1 WHERE a=2 AND (c=10 OR c IS NULL) ORDER BY c;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "read_sorted_file": {
+ "r_rows": 1,
+ "filesort": {
+ "sort_key": "t1.c",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "r_used_priority_queue": false,
+ "r_output_rows": 1,
+ "r_buffer_size": "REPLACED",
+ "r_sort_mode": "sort_key,addon_fields",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ref_or_null",
+ "possible_keys": ["a_c", "a"],
+ "key": "a_c",
+ "key_length": "10",
+ "used_key_parts": ["a", "c"],
+ "ref": ["const", "const"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 2,
+ "r_rows": 1,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 50,
+ "r_filtered": 100,
+ "attached_condition": "t1.c = 10 or t1.c is null",
+ "using_index": true
+ }
+ }
+ }
+ }
+ ]
+ }
+}
EXPLAIN
SELECT 1 AS col FROM t1 WHERE a=2 AND (c=10 OR c IS NULL) ORDER BY c;
id select_type table type possible_keys key key_len ref rows Extra
@@ -2976,17 +3033,17 @@ EXPLAIN
SELECT t1.a FROM t1 LEFT JOIN t2 ON t1.a=t2.a ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 4 NULL 8 Using index
-1 SIMPLE t2 ref i_a i_a 5 test.t1.a 2 Using index
+1 SIMPLE t2 ref i_a i_a 5 test.t1.a 1 Using index
EXPLAIN
SELECT t1.a FROM t1 LEFT JOIN t2 ON t1.a=t2.a ORDER BY t1.a LIMIT 8;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 4 NULL 4 Using index
-1 SIMPLE t2 ref i_a i_a 5 test.t1.a 2 Using index
+1 SIMPLE t1 index NULL PRIMARY 4 NULL 6 Using index
+1 SIMPLE t2 ref i_a i_a 5 test.t1.a 1 Using index
EXPLAIN
SELECT t1.a FROM t1 LEFT JOIN t2 ON t1.a=t2.a ORDER BY t1.a LIMIT 100;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 4 NULL 8 Using index
-1 SIMPLE t2 ref i_a i_a 5 test.t1.a 2 Using index
+1 SIMPLE t2 ref i_a i_a 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-4974 memory leak in 5.5.32-MariaDB-1~wheezy-log
@@ -3065,7 +3122,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1 100.00
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00
1 PRIMARY t3a ALL NULL NULL NULL NULL 2 100.00 Using where; Start temporary
-1 PRIMARY t3b ref f3_key f3_key 6 test.t3a.f3 1 100.00 Using where; End temporary
+1 PRIMARY t3b ref f3_key f3_key 6 test.t3a.f3 1 41.67 Using where; End temporary
Warnings:
Note 1003 select concat('foo',`test`.`t2`.`f2`) AS `field` from `test`.`t2` semi join ((`test`.`t3` `t3a` join `test`.`t3` `t3b`)) where `test`.`t3a`.`f3` < 'foo' or `test`.`t3b`.`f3` <> 'foo' order by concat('foo',`test`.`t2`.`f2`)
DROP TABLE t1,t2,t3;
@@ -3119,7 +3176,7 @@ id select_type table type possible_keys key key_len ref rows Extra
# See above query
EXPLAIN SELECT id1 FROM t2 WHERE id2=1 AND id3=1 ORDER BY date DESC LIMIT 0,4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range id_23_date,id_234_date id_23_date 2 NULL 8 Using where
+1 SIMPLE t2 ref id_23_date,id_234_date id_23_date 2 const,const 8 Using where
drop table t1,t2;
#
# MDEV-8989: ORDER BY optimizer ignores equality propagation
@@ -3188,13 +3245,13 @@ explain
select t2.pk,t2.a,t2.b,t3.pk,t3.a,t3.b
from t2, t3 where t2.a=t3.a order by t2.a limit 25;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL a NULL NULL NULL 200 Using where; Using filesort
+1 SIMPLE t2 index a a 5 NULL 25 Using where
1 SIMPLE t3 ref a a 5 test.t2.a 1
explain
select t2.pk,t2.a,t2.b,t3.pk,t3.a,t3.b
from t2, t3 where t2.a=t3.a order by t3.a limit 25;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL a NULL NULL NULL 200 Using where; Using filesort
+1 SIMPLE t2 index a a 5 NULL 25 Using where
1 SIMPLE t3 ref a a 5 test.t2.a 1
select t2.pk,t2.a,t2.b,t3.pk,t3.a,t3.b
from t2, t3 where t2.a=t3.a order by t2.a limit 25;
@@ -3401,6 +3458,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -3412,16 +3470,17 @@ ANALYZE
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"r_limit": 5,
- "r_used_priority_queue": false,
- "r_output_rows": 100,
- "r_buffer_size": "REPLACED",
- "r_sort_mode": "sort_key,packed_addon_fields",
+ "r_used_priority_queue": true,
+ "r_output_rows": 6,
+ "r_sort_mode": "sort_key,rowid",
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 100,
"r_rows": 100,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -3453,7 +3512,7 @@ CREATE TABLE t2 SELECT * FROM t1;
EXPLAIN SELECT (SELECT 1 FROM t1 WHERE t1.a=t2.b ORDER BY t1.b LIMIT 1) AS c FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 2
-2 DEPENDENT SUBQUERY t1 index PRIMARY b 5 NULL 1 Using where
+2 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 test.t2.b 1 Using where
SELECT (SELECT 1 FROM t1 WHERE t1.a=t2.b ORDER BY t1.b LIMIT 1) AS c FROM t2;
c
1
@@ -3501,11 +3560,10 @@ WHERE books.library_id = 8663 AND
books.scheduled_for_removal=0 )
ORDER BY wings.id;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2 100.00 Using filesort
-1 PRIMARY wings eq_ref PRIMARY PRIMARY 4 test.books.wings_id 1 100.00
-2 MATERIALIZED books ref library_idx library_idx 4 const 2 100.00 Using where
+1 PRIMARY wings ALL PRIMARY NULL NULL NULL 2 100.00 Using filesort
+1 PRIMARY books ref library_idx library_idx 4 const 2 50.00 Using where; FirstMatch(wings)
Warnings:
-Note 1003 select `test`.`wings`.`id` AS `wing_id`,`test`.`wings`.`department_id` AS `department_id` from `test`.`wings` semi join (`test`.`books`) where `test`.`books`.`library_id` = 8663 and `test`.`books`.`scheduled_for_removal` = 0 and `test`.`wings`.`id` = `test`.`books`.`wings_id` order by `test`.`wings`.`id`
+Note 1003 select `test`.`wings`.`id` AS `wing_id`,`test`.`wings`.`department_id` AS `department_id` from `test`.`wings` semi join (`test`.`books`) where `test`.`books`.`library_id` = 8663 and `test`.`books`.`scheduled_for_removal` = 0 and `test`.`books`.`wings_id` = `test`.`wings`.`id` order by `test`.`wings`.`id`
set optimizer_switch= @save_optimizer_switch;
DROP TABLE books, wings;
#
@@ -3637,8 +3695,8 @@ WHERE
t2.key1 = t1.a and t2.key1 IS NOT NULL
ORDER BY
t2.key2 ASC
-LIMIT 1)
-from t1;
+LIMIT 1) as "con"
+ from t1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 10
2 DEPENDENT SUBQUERY t2 ref key1 key1 5 test.t1.a 10 Using index condition; Using where; Using filesort
@@ -3649,25 +3707,19 @@ WHERE
t2.key1 = t1.a and t2.key1 IS NOT NULL
ORDER BY
t2.key2 ASC
-LIMIT 1)
-from t1;
-(SELECT concat(id, '-', key1, '-', col1)
-FROM t2
-WHERE
-t2.key1 = t1.a and t2.key1 IS NOT NULL
-ORDER BY
-t2.key2 ASC
-LIMIT 1)
-900-0-123456
-901-1-123456
-902-2-123456
-903-3-123456
-904-4-123456
-905-5-123456
-906-6-123456
-907-7-123456
-908-8-123456
-909-9-123456
+LIMIT 1) as "con"
+ from t1;
+con
+100-0-123456
+101-1-123456
+102-2-123456
+103-3-123456
+104-4-123456
+105-5-123456
+106-6-123456
+107-7-123456
+108-8-123456
+109-9-123456
drop table t1,t2;
# End of 10.3 tests
#
@@ -3758,6 +3810,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -3775,9 +3828,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 100,
"r_rows": 100,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -3924,6 +3979,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -3941,9 +3997,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 5,
"r_rows": 5,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -3973,6 +4031,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -3990,9 +4049,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 6,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -4028,6 +4089,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -4045,9 +4107,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 6,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -4097,6 +4161,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -4114,9 +4179,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -4279,6 +4346,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -4286,9 +4354,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 50,
"r_rows": 50,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -4303,6 +4373,7 @@ ANALYZE
"r_hit_ratio": 0,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 50,
"r_total_time_ms": "REPLACED",
"filesort": {
@@ -4319,9 +4390,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 50,
"rows": 50,
"r_rows": 50,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -4400,7 +4473,8 @@ CREATE TABLE t1 (a INT, b int, primary key(a));
CREATE TABLE t2 (a INT, b INT);
INSERT INTO t1 (a,b) VALUES (58,1),(96,2),(273,3),(23,4),(231,5),(525,6),
(2354,7),(321421,3),(535,2),(4535,3);
-INSERT INTO t2 (a,b) VALUES (58,3),(96,3),(273,3);
+INSERT INTO t2 (a,b) VALUES (58,3),(96,3),(273,3),(1000,1000),(2000,2000),(3000,3000);
+INSERT INTO t2 select seq,seq from seq_10_to_100;
# Join order should have the SJM scan table as the first table for both
# the queries with GROUP BY and ORDER BY clause.
EXPLAIN SELECT t1.a
@@ -4408,9 +4482,9 @@ FROM t1
WHERE t1.a IN (SELECT a FROM t2 WHERE b=3)
ORDER BY t1.a DESC;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 3 Using filesort
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 Using index
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY t1 index PRIMARY PRIMARY 4 NULL 10 Using index
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 97 Using where
EXPLAIN FORMAT=JSON SELECT t1.a
FROM t1
WHERE t1.a IN (SELECT a FROM t2 WHERE b=3)
@@ -4419,50 +4493,53 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
- "read_sorted_file": {
- "filesort": {
- "sort_key": "t1.a desc",
- "table": {
- "table_name": "<subquery2>",
- "access_type": "ALL",
- "possible_keys": ["distinct_key"],
- "rows": 3,
- "filtered": 100,
- "materialized": {
- "unique": 1,
- "query_block": {
- "select_id": 2,
- "nested_loop": [
- {
- "table": {
- "table_name": "t2",
- "access_type": "ALL",
- "rows": 3,
- "filtered": 100,
- "attached_condition": "t2.b = 3 and t2.a is not null"
- }
- }
- ]
- }
- }
- }
- }
+ "table": {
+ "table_name": "t1",
+ "access_type": "index",
+ "possible_keys": ["PRIMARY"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["a"],
+ "loops": 1,
+ "rows": 10,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "using_index": true
}
},
{
"table": {
- "table_name": "t1",
+ "table_name": "<subquery2>",
"access_type": "eq_ref",
- "possible_keys": ["PRIMARY"],
- "key": "PRIMARY",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
"key_length": "4",
"used_key_parts": ["a"],
- "ref": ["test.t2.a"],
+ "ref": ["func"],
"rows": 1,
"filtered": 100,
- "using_index": true
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "loops": 1,
+ "rows": 97,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "attached_condition": "t2.b = 3"
+ }
+ }
+ ]
+ }
+ }
}
}
]
@@ -4481,9 +4558,9 @@ FROM t1
WHERE t1.a IN (SELECT a FROM t2 WHERE b=3)
GROUP BY t1.a DESC;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 3 Using filesort
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 10 Using filesort
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 97 Using where
EXPLAIN FORMAT=JSON SELECT t1.a, group_concat(t1.b)
FROM t1
WHERE t1.a IN (SELECT a FROM t2 WHERE b=3)
@@ -4492,49 +4569,54 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"read_sorted_file": {
"filesort": {
"sort_key": "t1.a desc",
"table": {
- "table_name": "<subquery2>",
+ "table_name": "t1",
"access_type": "ALL",
- "possible_keys": ["distinct_key"],
- "rows": 3,
- "filtered": 100,
- "materialized": {
- "unique": 1,
- "query_block": {
- "select_id": 2,
- "nested_loop": [
- {
- "table": {
- "table_name": "t2",
- "access_type": "ALL",
- "rows": 3,
- "filtered": 100,
- "attached_condition": "t2.b = 3 and t2.a is not null"
- }
- }
- ]
- }
- }
+ "possible_keys": ["PRIMARY"],
+ "loops": 1,
+ "rows": 10,
+ "cost": "COST_REPLACED",
+ "filtered": 100
}
}
}
},
{
"table": {
- "table_name": "t1",
+ "table_name": "<subquery2>",
"access_type": "eq_ref",
- "possible_keys": ["PRIMARY"],
- "key": "PRIMARY",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
"key_length": "4",
"used_key_parts": ["a"],
- "ref": ["test.t2.a"],
+ "ref": ["func"],
"rows": 1,
- "filtered": 100
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "loops": 1,
+ "rows": 97,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "attached_condition": "t2.b = 3"
+ }
+ }
+ ]
+ }
+ }
}
}
]
diff --git a/mysql-test/main/order_by.test b/mysql-test/main/order_by.test
index 1cd9efa2710..a292e468ef2 100644
--- a/mysql-test/main/order_by.test
+++ b/mysql-test/main/order_by.test
@@ -9,6 +9,7 @@
--source include/no_view_protocol.inc
call mtr.add_suppression("Sort aborted.*");
+--source include/have_sequence.inc
call mtr.add_suppression("Out of sort memory; increase server sort buffer size");
--source include/have_sequence.inc
@@ -798,11 +799,16 @@ INSERT INTO t2 SELECT * FROM t1 ORDER BY id;
EXPLAIN SELECT id,c3 FROM t2 WHERE c2=11 ORDER BY c3 LIMIT 20;
EXPLAIN SELECT id,c3 FROM t2 WHERE c2=11 ORDER BY c3 LIMIT 4000;
+EXPLAIN SELECT id,c3 FROM t2 WHERE c2=11 ORDER BY c3 LIMIT 6000;
EXPLAIN SELECT id,c3 FROM t2 WHERE c2 BETWEEN 10 AND 12 ORDER BY c3 LIMIT 20;
EXPLAIN SELECT id,c3 FROM t2 WHERE c2 BETWEEN 20 AND 30 ORDER BY c3 LIMIT 4000;
SELECT id,c3 FROM t2 WHERE c2=11 ORDER BY c3 LIMIT 20;
+# Show that order by optimization takes into account index only scans
+ALTER TABLE t2 DROP INDEX k3, ADD INDEX k3 (c3,c2);
+EXPLAIN SELECT c3 FROM t2 WHERE c2=11 ORDER BY c3 LIMIT 4000;
+
DROP TABLE t1,t2;
#
@@ -907,8 +913,12 @@ CREATE TABLE t1 (
INSERT INTO t1 VALUES (1, 10), (2, NULL);
--echo # Must use ref-or-null on the a_c index
+--source include/analyze-format.inc
+ANALYZE FORMAT=JSON
+SELECT 1 AS col FROM t1 WHERE a=2 AND (c=10 OR c IS NULL) ORDER BY c;
EXPLAIN
SELECT 1 AS col FROM t1 WHERE a=2 AND (c=10 OR c IS NULL) ORDER BY c;
+
--echo # Must return 1 row
SELECT 1 AS col FROM t1 WHERE a=2 AND (c=10 OR c IS NULL) ORDER BY c;
@@ -2388,7 +2398,7 @@ let $query= select
t2.key1 = t1.a and t2.key1 IS NOT NULL
ORDER BY
t2.key2 ASC
- LIMIT 1)
+ LIMIT 1) as "con"
from t1;
--echo # here type should show ref not index
@@ -2671,8 +2681,8 @@ CREATE TABLE t2 (a INT, b INT);
INSERT INTO t1 (a,b) VALUES (58,1),(96,2),(273,3),(23,4),(231,5),(525,6),
(2354,7),(321421,3),(535,2),(4535,3);
-INSERT INTO t2 (a,b) VALUES (58,3),(96,3),(273,3);
-
+INSERT INTO t2 (a,b) VALUES (58,3),(96,3),(273,3),(1000,1000),(2000,2000),(3000,3000);
+INSERT INTO t2 select seq,seq from seq_10_to_100;
--echo # Join order should have the SJM scan table as the first table for both
--echo # the queries with GROUP BY and ORDER BY clause.
@@ -2682,6 +2692,7 @@ let $query= SELECT t1.a
ORDER BY t1.a DESC;
eval EXPLAIN $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
eval $query;
@@ -2691,6 +2702,7 @@ let $query= SELECT t1.a, group_concat(t1.b)
GROUP BY t1.a DESC;
eval EXPLAIN $query;
+--source include/explain-no-costs.inc
eval EXPLAIN FORMAT=JSON $query;
eval $query;
DROP TABLE t1, t2;
diff --git a/mysql-test/main/order_by_innodb.result b/mysql-test/main/order_by_innodb.result
index 741084c8a6c..ad4acad3319 100644
--- a/mysql-test/main/order_by_innodb.result
+++ b/mysql-test/main/order_by_innodb.result
@@ -53,17 +53,29 @@ KEY a_c (a,c),
KEY a_b (a,b)
) ENGINE=InnoDB;
insert into t1 select A.a , B.a, C.a from t0 A, t0 B, t0 C;
+select count(*) from t1;
+count(*)
+1000
+select count(*) from t1 where a=1;
+count(*)
+200
+select count(*) from t1 where a=1 and c=2;
+count(*)
+20
# should use ref access
explain select a,b,c from t1 where a=1 and c=2 order by b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref a_c,a_b a_c 10 const,const 20 Using where; Using filesort
-# both should use range access
-explain select a,b,c from t1 where a=1 and c=2 order by b limit 1000;
+# all should use range access
+explain select a,b,c from t1 where a=1 and c=2 order by b limit 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref a_c,a_b a_c 10 const,const 20 Using where; Using filesort
+explain select a,b,c from t1 where a=1 and c=2 order by b limit 300;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a_c,a_b a_b 5 NULL 200 Using where
-explain select a,b,c from t1 where a=1 and c=2 order by b limit 2000;
+1 SIMPLE t1 ref a_c,a_b a_c 10 const,const 20 Using where; Using filesort
+explain select a,b,c from t1 where a=1 and c=2 order by b limit 1000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a_c,a_b a_b 5 NULL 200 Using where
+1 SIMPLE t1 ref a_c,a_b a_c 10 const,const 20 Using where; Using filesort
drop table t1,t0;
# Start of 10.2 tests
#
@@ -244,8 +256,8 @@ d1 > '2019-02-06 00:00:00'
dd.d1, dd.d2, dd.id limit 1
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 index NULL PRIMARY 4 NULL # Using index
-1 PRIMARY t2 eq_ref PRIMARY,id2 PRIMARY 4 func # Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL #
+1 PRIMARY t2 eq_ref PRIMARY,id2 id2 8 test.t1.id,func # Using where; Using index
2 DEPENDENT SUBQUERY dd range id2,for_latest_sort for_latest_sort 6 NULL # Using where
drop table t1,t2,t3;
# End of 10.2 tests
diff --git a/mysql-test/main/order_by_innodb.test b/mysql-test/main/order_by_innodb.test
index bdaef56672f..acce96c7603 100644
--- a/mysql-test/main/order_by_innodb.test
+++ b/mysql-test/main/order_by_innodb.test
@@ -66,13 +66,17 @@ KEY a_c (a,c),
KEY a_b (a,b)
) ENGINE=InnoDB;
insert into t1 select A.a , B.a, C.a from t0 A, t0 B, t0 C;
+select count(*) from t1;
+select count(*) from t1 where a=1;
+select count(*) from t1 where a=1 and c=2;
--echo # should use ref access
explain select a,b,c from t1 where a=1 and c=2 order by b;
---echo # both should use range access
+--echo # all should use range access
+explain select a,b,c from t1 where a=1 and c=2 order by b limit 10;
+explain select a,b,c from t1 where a=1 and c=2 order by b limit 300;
explain select a,b,c from t1 where a=1 and c=2 order by b limit 1000;
-explain select a,b,c from t1 where a=1 and c=2 order by b limit 2000;
drop table t1,t0;
--echo # Start of 10.2 tests
diff --git a/mysql-test/main/order_by_pack_big.result b/mysql-test/main/order_by_pack_big.result
index 6b33d7d8202..09b4b729de1 100644
--- a/mysql-test/main/order_by_pack_big.result
+++ b/mysql-test/main/order_by_pack_big.result
@@ -94,6 +94,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -111,9 +112,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10000,
"r_rows": 10000,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -258,6 +261,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -276,9 +280,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10000,
"r_rows": 10000,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -420,6 +426,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -437,9 +444,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10000,
"r_rows": 10000,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -476,6 +485,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -493,9 +503,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10000,
"r_rows": 10000,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
diff --git a/mysql-test/main/order_by_sortkey.result b/mysql-test/main/order_by_sortkey.result
index c1d9609eb47..6970568882f 100644
--- a/mysql-test/main/order_by_sortkey.result
+++ b/mysql-test/main/order_by_sortkey.result
@@ -40,6 +40,9 @@ INSERT INTO tmp SELECT f1,f2 FROM t1;
INSERT INTO t1(f1,f2) SELECT * FROM tmp;
INSERT INTO tmp SELECT f1,f2 FROM t1;
INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+select count(*) from t1;
+count(*)
+87700
set sort_buffer_size= 32768;
FLUSH STATUS;
SHOW SESSION STATUS LIKE 'Sort%';
@@ -49,6 +52,9 @@ Sort_priority_queue_sorts 0
Sort_range 0
Sort_rows 0
Sort_scan 0
+explain SELECT * FROM t1 ORDER BY f2 LIMIT 100;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 87700 Using filesort
SELECT * FROM t1 ORDER BY f2 LIMIT 100;
f0 f1 f2
1 0 0
@@ -151,11 +157,24 @@ f0 f1 f2
9701 0 0
9801 0 0
9901 0 0
-SHOW SESSION STATUS LIKE 'Sort%';
-Variable_name Value
-Sort_merge_passes 0
-Sort_priority_queue_sorts 1
-Sort_range 0
-Sort_rows 100
-Sort_scan 1
-DROP TABLE t1, tmp;
+create table t2
+select * from information_schema.SESSION_STATUS
+where
+variable_name like 'handler_read_rnd%' or
+variable_name like 'Sort%';
+select * from t2 where variable_name like 'Sort%';
+VARIABLE_NAME VARIABLE_VALUE
+SORT_MERGE_PASSES 0
+SORT_PRIORITY_QUEUE_SORTS 1
+SORT_RANGE 0
+SORT_ROWS 100
+SORT_SCAN 1
+select * from t2 where variable_name='HANDLER_READ_RND';
+VARIABLE_NAME VARIABLE_VALUE
+HANDLER_READ_RND 100
+select
+if(variable_value in (87701, 87802), 'OK', 'FAIL') as RES
+from t2 where variable_name='HANDLER_READ_RND_NEXT';
+RES
+OK
+DROP TABLE t1, tmp, t2;
diff --git a/mysql-test/main/order_by_sortkey.test b/mysql-test/main/order_by_sortkey.test
index 43de028496e..8c7ae4b192e 100644
--- a/mysql-test/main/order_by_sortkey.test
+++ b/mysql-test/main/order_by_sortkey.test
@@ -50,6 +50,7 @@ INSERT INTO tmp SELECT f1,f2 FROM t1;
INSERT INTO t1(f1,f2) SELECT * FROM tmp;
INSERT INTO tmp SELECT f1,f2 FROM t1;
INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+select count(*) from t1;
# Test when only sortkeys fits to memory
set sort_buffer_size= 32768;
@@ -57,8 +58,24 @@ set sort_buffer_size= 32768;
FLUSH STATUS;
SHOW SESSION STATUS LIKE 'Sort%';
+explain SELECT * FROM t1 ORDER BY f2 LIMIT 100;
SELECT * FROM t1 ORDER BY f2 LIMIT 100;
-SHOW SESSION STATUS LIKE 'Sort%';
+create table t2
+select * from information_schema.SESSION_STATUS
+where
+ variable_name like 'handler_read_rnd%' or
+ variable_name like 'Sort%';
+
+# Check that Sort_priority_queue_sorts is used
+select * from t2 where variable_name like 'Sort%';
+
+# Check that we did scan the whole table and did LIMIT lookups
+select * from t2 where variable_name='HANDLER_READ_RND';
+
+select
+ if(variable_value in (87701, 87802), 'OK', 'FAIL') as RES
+from t2 where variable_name='HANDLER_READ_RND_NEXT';
+
-DROP TABLE t1, tmp;
+DROP TABLE t1, tmp, t2;
diff --git a/mysql-test/main/outfile_loaddata.result b/mysql-test/main/outfile_loaddata.result
index 4356f8b113e..1449cb19453 100644
--- a/mysql-test/main/outfile_loaddata.result
+++ b/mysql-test/main/outfile_loaddata.result
@@ -124,19 +124,16 @@ ERROR 42000: Field separator argument is not what is expected; check the manual
# LOAD DATA rises error or has unpredictable result -- to be fixed later
SELECT * FROM t1 INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' FIELDS ENCLOSED BY 'ÑŠ';
Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1638 Non-ASCII separator arguments are not fully supported
LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' INTO TABLE t2 CHARACTER SET binary FIELDS ENCLOSED BY 'ÑŠ';
ERROR 42000: Field separator argument is not what is expected; check the manual
SELECT * FROM t1 INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' FIELDS ESCAPED BY 'ÑŠ';
Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1638 Non-ASCII separator arguments are not fully supported
LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' INTO TABLE t2 CHARACTER SET binary FIELDS ESCAPED BY 'ÑŠ';
ERROR 42000: Field separator argument is not what is expected; check the manual
SELECT * FROM t1 INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' FIELDS TERMINATED BY 'ÑŠ';
Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1638 Non-ASCII separator arguments are not fully supported
##################################################
1ÑŠABC-áâ÷ÑŠDEF-ÂÃÄ
@@ -160,7 +157,6 @@ a b c
2 NULL NULL
SELECT * FROM t1 INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' LINES STARTING BY 'ÑŠ';
Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1638 Non-ASCII separator arguments are not fully supported
##################################################
ÑŠ1 ABC-áâ÷ DEF-ÂÃÄ
@@ -176,7 +172,6 @@ a b c
2 NULL NULL
SELECT * FROM t1 INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' LINES TERMINATED BY 'ÑŠ';
Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1638 Non-ASCII separator arguments are not fully supported
##################################################
1 ABC-áâ÷ DEF-ÂÃÄÑŠ2 \N \NÑŠ##################################################
diff --git a/mysql-test/main/parser.result b/mysql-test/main/parser.result
index 50be9dc49c8..c0c459e57b4 100644
--- a/mysql-test/main/parser.result
+++ b/mysql-test/main/parser.result
@@ -764,11 +764,8 @@ SELECT 1 FROM t1 UNION SELECT 1 FROM t1 ORDER BY 1 LIMIT 1 FOR UPDATE;
# "INTO" clause tests
SELECT 1 FROM t1 INTO @var17727401;
Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1329 No data - zero rows fetched, selected, or processed
SELECT 1 FROM DUAL INTO @var17727401;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT 1 INTO @var17727401;
SELECT 1 INTO @var17727401 FROM t1;
Warnings:
@@ -784,7 +781,6 @@ Warnings:
Warning 1329 No data - zero rows fetched, selected, or processed
SELECT 1 FROM t1 WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1 LIMIT 1 INTO @var17727401;
Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1329 No data - zero rows fetched, selected, or processed
SELECT 1 FROM t1 WHERE 1 INTO @var17727401 GROUP BY 1 HAVING 1 ORDER BY 1 LIMIT 1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GROUP BY 1 HAVING 1 ORDER BY 1 LIMIT 1' at line 1
@@ -804,7 +800,6 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'INTO @var17727401) UNION (SELECT 1 FROM t1 INTO t1)' at line 1
SELECT 1 FROM t1 UNION SELECT 1 FROM t1 INTO @var17727401;
Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1329 No data - zero rows fetched, selected, or processed
SELECT 1 INTO @var17727401 FROM t1 PROCEDURE ANALYSE();
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE()' at line 1
diff --git a/mysql-test/main/partition.result b/mysql-test/main/partition.result
index 0d37d4cb168..ad4c3cf1ad3 100644
--- a/mysql-test/main/partition.result
+++ b/mysql-test/main/partition.result
@@ -2361,11 +2361,11 @@ b c
EXPLAIN
SELECT b, c FROM t1 WHERE b = 1 GROUP BY b, c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range bc bc 10 NULL 8 Using where; Using index for group-by
+1 SIMPLE t1 ref bc bc 5 const 23 Using where; Using index
EXPLAIN
SELECT b, c FROM t1 WHERE b = 1 or b=2 GROUP BY b, c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range bc bc 10 NULL 8 Using where; Using index for group-by
+1 SIMPLE t1 range bc bc 5 NULL 23 Using where; Using index
DROP TABLE t1;
#
# Bug #45807: crash accessing partitioned table and sql_mode
diff --git a/mysql-test/main/partition_exchange.result b/mysql-test/main/partition_exchange.result
index b7c9be3480e..f68267c0f55 100644
--- a/mysql-test/main/partition_exchange.result
+++ b/mysql-test/main/partition_exchange.result
@@ -1231,7 +1231,7 @@ DROP TABLE t, t2, tp;
# failed during EXCHANGE PARTITION with different TABLESPACE.
#
CREATE TABLE t1 (a VARCHAR(200)) PARTITION BY KEY(a) partitions 10;
-ALTER TABLE t1 ADD PARTITION (PARTITION pm TABLESPACE = `innodb_file_per_table`);
+ALTER TABLE t1 ADD PARTITION (PARTITION pm);
CREATE TABLE t2 like t1;
ALTER TABLE t2 REMOVE PARTITIONING;
ALTER TABLE t1 EXCHANGE PARTITION pm WITH TABLE t2;
diff --git a/mysql-test/main/partition_exchange.test b/mysql-test/main/partition_exchange.test
index 82ccc29e00e..e35d8e1f9d1 100644
--- a/mysql-test/main/partition_exchange.test
+++ b/mysql-test/main/partition_exchange.test
@@ -525,7 +525,7 @@ DROP TABLE t, t2, tp;
--echo # failed during EXCHANGE PARTITION with different TABLESPACE.
--echo #
CREATE TABLE t1 (a VARCHAR(200)) PARTITION BY KEY(a) partitions 10;
-ALTER TABLE t1 ADD PARTITION (PARTITION pm TABLESPACE = `innodb_file_per_table`);
+ALTER TABLE t1 ADD PARTITION (PARTITION pm);
CREATE TABLE t2 like t1;
ALTER TABLE t2 REMOVE PARTITIONING;
# The following works as table spaces are not checked anymore
diff --git a/mysql-test/main/partition_explicit_prune.result b/mysql-test/main/partition_explicit_prune.result
index 07af2d58a42..f4436c5040c 100644
--- a/mysql-test/main/partition_explicit_prune.result
+++ b/mysql-test/main/partition_explicit_prune.result
@@ -470,7 +470,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE TableAlias p0-9_subp3 index NULL b 71 NULL 3 Using index
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 PARTITION (`p10-99`);
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p10-99_subp4,p10-99_subp5 index NULL PRIMARY 4 NULL 2 Using index
+1 SIMPLE t1 p10-99_subp4,p10-99_subp5 index NULL b 71 NULL 2 Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = 1000000;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
@@ -693,8 +693,6 @@ a b
-21 REPLACEd by REPLACE
FLUSH STATUS;
SELECT * FROM t1 PARTITION (pNeg, `p10-99`) INTO OUTFILE 'loadtest.txt';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
VARIABLE_NAME VARIABLE_VALUE
diff --git a/mysql-test/main/partition_innodb2.result b/mysql-test/main/partition_innodb2.result
new file mode 100644
index 00000000000..4476eb91447
--- /dev/null
+++ b/mysql-test/main/partition_innodb2.result
@@ -0,0 +1,24 @@
+#
+# MDEV-30442: Assertion `!m_innodb' failed in ha_partition::cmp_ref on MULTI-DELETE
+#
+create table t1 (a int) engine=innodb;
+insert into t1 values (1),(2),(1),(2);
+create table t2 (
+a int,
+b int,
+key(a)
+) engine=innodb partition by list(a)
+(
+partition p0 values in (1),
+partition p1 values in (2),
+partition p2 values in (0,3,4,5,6,7,8,9)
+);
+insert into t2 select
+mod(seq, 10), seq from seq_1_to_50;
+explain
+delete t1, t2 from t1, t2 where t1.a=t2.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4 Using where
+1 SIMPLE t2 ref a a 5 test.t1.a 1
+delete t1, t2 from t1, t2 where t1.a=t2.a;
+drop table t1,t2;
diff --git a/mysql-test/main/partition_innodb2.test b/mysql-test/main/partition_innodb2.test
new file mode 100644
index 00000000000..7581c5db4ca
--- /dev/null
+++ b/mysql-test/main/partition_innodb2.test
@@ -0,0 +1,30 @@
+--source include/not_embedded.inc
+--source include/have_partition.inc
+--source include/have_innodb.inc
+--source include/have_sequence.inc
+
+--echo #
+--echo # MDEV-30442: Assertion `!m_innodb' failed in ha_partition::cmp_ref on MULTI-DELETE
+--echo #
+create table t1 (a int) engine=innodb;
+insert into t1 values (1),(2),(1),(2);
+
+create table t2 (
+ a int,
+ b int,
+ key(a)
+) engine=innodb partition by list(a)
+(
+ partition p0 values in (1),
+ partition p1 values in (2),
+ partition p2 values in (0,3,4,5,6,7,8,9)
+);
+
+insert into t2 select
+ mod(seq, 10), seq from seq_1_to_50;
+
+explain
+delete t1, t2 from t1, t2 where t1.a=t2.a;
+delete t1, t2 from t1, t2 where t1.a=t2.a;
+
+drop table t1,t2;
diff --git a/mysql-test/main/partition_innodb_plugin.result b/mysql-test/main/partition_innodb_plugin.result
index 8211f0aac89..25b69212e5b 100644
--- a/mysql-test/main/partition_innodb_plugin.result
+++ b/mysql-test/main/partition_innodb_plugin.result
@@ -42,6 +42,8 @@ SET @old_innodb_file_per_table = @@global.innodb_file_per_table;
SET @old_innodb_strict_mode = @@global.innodb_strict_mode;
SET @@global.innodb_file_per_table = ON,
@@global.innodb_strict_mode = ON;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
connect con1,localhost,root,,;
CREATE TABLE t1 (id INT NOT NULL
PRIMARY KEY,
@@ -62,6 +64,8 @@ t1 CREATE TABLE `t1` (
PARTITION BY HASH (`id`)
PARTITIONS 1
SET GLOBAL innodb_file_per_table = OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
disconnect con1;
connect con2,localhost,root,,;
LOCK TABLE t1 WRITE;
@@ -104,6 +108,8 @@ disconnect con2;
connection default;
SET @@global.innodb_strict_mode = @old_innodb_strict_mode;
SET @@global.innodb_file_per_table = @old_innodb_file_per_table;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
SET @save_detect= @@GLOBAL.innodb_deadlock_detect;
SET @save_report= @@GLOBAL.innodb_deadlock_report;
SET GLOBAL innodb_deadlock_detect=ON;
diff --git a/mysql-test/main/partition_mrr_aria.result b/mysql-test/main/partition_mrr_aria.result
index 7ff5c9b63ed..c7983007281 100644
--- a/mysql-test/main/partition_mrr_aria.result
+++ b/mysql-test/main/partition_mrr_aria.result
@@ -130,7 +130,7 @@ set optimizer_switch='mrr=on';
explain extended select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = t2.a-1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range idx idx 5 NULL 2 100.00 Using where; Using index
-1 SIMPLE t0 ref idx idx 5 test.t2.a 12 100.00 Using index condition(BKA); Using join buffer (flat, BKA join); Rowid-ordered scan
+1 SIMPLE t0 ALL idx NULL NULL NULL 50 25.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t0`.`tp` AS `tp`,`test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t0`.`c` AS `c`,`test`.`t2`.`a` AS `a` from `test`.`t0` join `test`.`t2` where `test`.`t0`.`a` = `test`.`t2`.`a` and `test`.`t2`.`a` in (3,4) and `test`.`t0`.`b` / 10 = `test`.`t2`.`a` - 1
select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = t2.a-1;
@@ -164,7 +164,7 @@ tp a b c a
explain extended select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = 4;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range idx idx 5 NULL 2 100.00 Using where; Using index
-1 SIMPLE t0 ref idx idx 5 test.t2.a 12 100.00 Using index condition; Using join buffer (flat, BKA join); Rowid-ordered scan
+1 SIMPLE t0 ALL idx NULL NULL NULL 50 25.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t0`.`tp` AS `tp`,`test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t0`.`c` AS `c`,`test`.`t2`.`a` AS `a` from `test`.`t0` join `test`.`t2` where `test`.`t0`.`a` = `test`.`t2`.`a` and `test`.`t2`.`a` in (3,4) and `test`.`t0`.`b` / 10 = 4
select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = 4;
diff --git a/mysql-test/main/partition_mrr_innodb.result b/mysql-test/main/partition_mrr_innodb.result
index 98819021a6d..7d91fafef7c 100644
--- a/mysql-test/main/partition_mrr_innodb.result
+++ b/mysql-test/main/partition_mrr_innodb.result
@@ -130,7 +130,7 @@ set optimizer_switch='mrr=on';
explain extended select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = t2.a-1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range idx idx 5 NULL 2 100.00 Using where; Using index
-1 SIMPLE t0 ref idx idx 5 test.t2.a 12 100.00 Using index condition(BKA); Using join buffer (flat, BKA join); Rowid-ordered scan
+1 SIMPLE t0 ALL idx NULL NULL NULL 50 25.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t0`.`tp` AS `tp`,`test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t0`.`c` AS `c`,`test`.`t2`.`a` AS `a` from `test`.`t0` join `test`.`t2` where `test`.`t0`.`a` = `test`.`t2`.`a` and `test`.`t2`.`a` in (3,4) and `test`.`t0`.`b` / 10 = `test`.`t2`.`a` - 1
select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = t2.a-1;
@@ -164,7 +164,7 @@ tp a b c a
explain extended select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = 4;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range idx idx 5 NULL 2 100.00 Using where; Using index
-1 SIMPLE t0 ref idx idx 5 test.t2.a 12 100.00 Using index condition; Using join buffer (flat, BKA join); Rowid-ordered scan
+1 SIMPLE t0 ALL idx NULL NULL NULL 50 25.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t0`.`tp` AS `tp`,`test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t0`.`c` AS `c`,`test`.`t2`.`a` AS `a` from `test`.`t0` join `test`.`t2` where `test`.`t0`.`a` = `test`.`t2`.`a` and `test`.`t2`.`a` in (3,4) and `test`.`t0`.`b` / 10 = 4
select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = 4;
diff --git a/mysql-test/main/partition_mrr_myisam.result b/mysql-test/main/partition_mrr_myisam.result
index c3ce2935417..9155851a60a 100644
--- a/mysql-test/main/partition_mrr_myisam.result
+++ b/mysql-test/main/partition_mrr_myisam.result
@@ -130,7 +130,7 @@ set optimizer_switch='mrr=on';
explain extended select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = t2.a-1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range idx idx 5 NULL 2 100.00 Using where; Using index
-1 SIMPLE t0 ref idx idx 5 test.t2.a 12 100.00 Using index condition(BKA); Using join buffer (flat, BKA join); Rowid-ordered scan
+1 SIMPLE t0 ALL idx NULL NULL NULL 50 25.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t0`.`tp` AS `tp`,`test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t0`.`c` AS `c`,`test`.`t2`.`a` AS `a` from `test`.`t0` join `test`.`t2` where `test`.`t0`.`a` = `test`.`t2`.`a` and `test`.`t2`.`a` in (3,4) and `test`.`t0`.`b` / 10 = `test`.`t2`.`a` - 1
select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = t2.a-1;
@@ -164,7 +164,7 @@ tp a b c a
explain extended select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = 4;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range idx idx 5 NULL 2 100.00 Using where; Using index
-1 SIMPLE t0 ref idx idx 5 test.t2.a 12 100.00 Using index condition; Using join buffer (flat, BKA join); Rowid-ordered scan
+1 SIMPLE t0 ALL idx NULL NULL NULL 50 25.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t0`.`tp` AS `tp`,`test`.`t0`.`a` AS `a`,`test`.`t0`.`b` AS `b`,`test`.`t0`.`c` AS `c`,`test`.`t2`.`a` AS `a` from `test`.`t0` join `test`.`t2` where `test`.`t0`.`a` = `test`.`t2`.`a` and `test`.`t2`.`a` in (3,4) and `test`.`t0`.`b` / 10 = 4
select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = 4;
@@ -288,7 +288,7 @@ explain
select * from t0,t2 where t2.a in (3,4) and t0.a=t2.a and (t0.b / 10) = 4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range idx idx 5 NULL 2 Using where; Using index
-1 SIMPLE t0 ref idx idx 5 test.t2.a 12 Using index condition; Using join buffer (flat, BKA join); Rowid-ordered scan
+1 SIMPLE t0 ALL idx NULL NULL NULL 50 Using where; Using join buffer (flat, BNL join)
# This will use "Using index condition(BKA)"
explain
select * from t1,t2 where t2.a in (3,4) and t1.a=t2.a and (t1.b / 10) = 4;
diff --git a/mysql-test/main/partition_pruning.result b/mysql-test/main/partition_pruning.result
index ec0cb144a51..eef1df4095f 100644
--- a/mysql-test/main/partition_pruning.result
+++ b/mysql-test/main/partition_pruning.result
@@ -15,13 +15,13 @@ PARTITION max VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (-1),(0),(1),(2),(3),(4),(5),(6),(7),(8);
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 1;
id select_type table partitions type possible_keys key key_len ref rows Extra
-# # # # # # # # # 3 #
+# # # # range # # # # 3 #
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 7;
id select_type table partitions type possible_keys key key_len ref rows Extra
-# # # # # # # # # 8 #
+# # # # index # # # # 10 #
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 1;
id select_type table partitions type possible_keys key key_len ref rows Extra
-# # # # # # # # # 3 #
+# # # # range # # # # 3 #
DROP TABLE t1;
#
# Bug#49742: Partition Pruning not working correctly for RANGE
@@ -92,7 +92,7 @@ a
5
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 6;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,p5 range PRIMARY PRIMARY 4 NULL 7 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,p5 index PRIMARY PRIMARY 4 NULL 7 Using where; Using index
SELECT * FROM t1 WHERE a < 7;
a
-1
@@ -105,7 +105,7 @@ a
6
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 7;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max range PRIMARY PRIMARY 4 NULL 8 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max index PRIMARY PRIMARY 4 NULL 10 Using where; Using index
SELECT * FROM t1 WHERE a <= 1;
a
-1
@@ -155,7 +155,7 @@ a
5
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 5;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,p5 range PRIMARY PRIMARY 4 NULL 7 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,p5 index PRIMARY PRIMARY 4 NULL 7 Using where; Using index
SELECT * FROM t1 WHERE a <= 6;
a
-1
@@ -168,7 +168,7 @@ a
6
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 6;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max range PRIMARY PRIMARY 4 NULL 8 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max index PRIMARY PRIMARY 4 NULL 10 Using where; Using index
SELECT * FROM t1 WHERE a <= 7;
a
-1
@@ -182,7 +182,7 @@ a
7
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 7;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max range PRIMARY PRIMARY 4 NULL 9 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max index PRIMARY PRIMARY 4 NULL 10 Using where; Using index
SELECT * FROM t1 WHERE a = 1;
a
1
@@ -237,7 +237,7 @@ a
8
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= 1;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p1,p2,p3,p4,p5,max range PRIMARY PRIMARY 4 NULL 8 Using where; Using index
+1 SIMPLE t1 p1,p2,p3,p4,p5,max index PRIMARY PRIMARY 4 NULL 8 Using where; Using index
SELECT * FROM t1 WHERE a >= 2;
a
2
@@ -424,7 +424,7 @@ a
5
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 6;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,max range PRIMARY PRIMARY 4 NULL 7 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,max index PRIMARY PRIMARY 4 NULL 9 Using where; Using index
SELECT * FROM t1 WHERE a <= 1;
a
-1
@@ -474,7 +474,7 @@ a
5
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 5;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,max range PRIMARY PRIMARY 4 NULL 7 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,max index PRIMARY PRIMARY 4 NULL 9 Using where; Using index
SELECT * FROM t1 WHERE a <= 6;
a
-1
@@ -487,7 +487,7 @@ a
6
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 6;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,max range PRIMARY PRIMARY 4 NULL 8 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,max index PRIMARY PRIMARY 4 NULL 9 Using where; Using index
SELECT * FROM t1 WHERE a = 1;
a
1
@@ -2677,12 +2677,12 @@ select * from t1 X, t1 Y
where X.b = Y.b and (X.a=1 or X.a=2) and (Y.a=2 or Y.a=3);
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE X p1,p2 range a,b a 4 NULL 4 Using where
-1 SIMPLE Y p2,p3 ref a,b b 4 test.X.b 2 Using where
+1 SIMPLE Y p2,p3 ref a,b b 4 test.X.b 1 Using where
explain partitions
select * from t1 X, t1 Y where X.a = Y.a and (X.a=1 or X.a=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE X p1,p2 range a a 4 NULL 4 Using where
-1 SIMPLE Y p1,p2 ref a a 4 test.X.a 2
+1 SIMPLE Y p1,p2 ref a a 4 test.X.a 1
drop table t1;
create table t1 (a int) partition by hash(a) partitions 20;
insert into t1 values (1),(2),(3);
@@ -3465,8 +3465,8 @@ select * from t1
where company_id = 1000
and dept_id in (select dept_id from t2 where COMPANY_ID = 1000);
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 p_1000 ref PRIMARY PRIMARY 8 const 3 Using index
-1 PRIMARY t1 p_1000 ALL PRIMARY NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t1 p_1000 ref PRIMARY PRIMARY 8 const 6 Using where
+1 PRIMARY t2 p_1000 eq_ref PRIMARY PRIMARY 16 const,test.t1.dept_id 1 Using index
drop table t1,t2;
#
# MDEV-9505: Valgrind failure in SEL_ARG::store_min,find_used_partitions,...
diff --git a/mysql-test/main/partition_pruning.test b/mysql-test/main/partition_pruning.test
index d59f52be313..78924d27550 100644
--- a/mysql-test/main/partition_pruning.test
+++ b/mysql-test/main/partition_pruning.test
@@ -25,11 +25,11 @@ PARTITION max VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (-1),(0),(1),(2),(3),(4),(5),(6),(7),(8);
---replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 11 #
+--replace_column 1 # 2 # 3 # 4 # 6 # 7 # 8 # 9 # 11 #
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 1;
---replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 11 #
+--replace_column 1 # 2 # 3 # 4 # 6 # 7 # 8 # 9 # 11 #
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 7;
---replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 11 #
+--replace_column 1 # 2 # 3 # 4 # 6 # 7 # 8 # 9 # 11 #
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 1;
DROP TABLE t1;
diff --git a/mysql-test/main/partition_range.result b/mysql-test/main/partition_range.result
index 9cefe83e1e2..be1689b218e 100644
--- a/mysql-test/main/partition_range.result
+++ b/mysql-test/main/partition_range.result
@@ -4,23 +4,26 @@ drop table if exists t1, t2;
#
CREATE TABLE t1 (a INT,b INT,KEY a (a,b));
INSERT INTO `t1` VALUES (0,580092),(3000,894076),(4000,805483),(4000,913540),(6000,611137),(8000,171602),(9000,599495),(9000,746305),(10000,272829),(10000,847519),(12000,258869),(12000,929028),(13000,288970),(15000,20971),(15000,105839),(16000,788272),(17000,76914),(18000,827274),(19000,802258),(20000,123677),(20000,587729),(22000,701449),(25000,31565),(25000,230782),(25000,442887),(25000,733139),(25000,851020);
+SELECT COUNT(*) from t1 where a IN (10000, 1000000, 3000);
+COUNT(*)
+3
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10000, 1000000, 3000) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 1 Using where; Using index for group-by
+1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index for group-by
alter table t1 partition by hash(a) partitions 1;
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10000, 1000000, 3000) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 1 Using where; Using index for group-by
+1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index for group-by
alter table t1 remove partitioning;
insert into t1 (a,b) select seq,seq from seq_4001_to_4100;
insert into t1 (a,b) select seq,seq from seq_10001_to_10100;
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10000, 1000000, 3000) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 4 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index for group-by
alter table t1 partition by hash(a) partitions 1;
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10000, 1000000, 3000) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 4 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index for group-by
DROP TABLE t1;
create table t1 (a DATETIME)
partition by range (TO_DAYS(a))
@@ -955,6 +958,11 @@ CREATE TABLE t1 (
a INT,
b INT,
KEY ( a, b )
+);
+CREATE TABLE t1_part (
+a INT,
+b INT,
+KEY ( a, b )
) PARTITION BY HASH (a) PARTITIONS 1;
CREATE TABLE t2 (
a INT,
@@ -966,36 +974,68 @@ INSERT INTO t1 SELECT a + 5, b + 5 FROM t1;
INSERT INTO t1 SELECT a + 10, b + 10 FROM t1;
INSERT INTO t1 SELECT a + 20, b + 20 FROM t1;
INSERT INTO t1 SELECT a + 40, b + 40 FROM t1;
+INSERT INTO t1 values(10,0),(10,1),(10,2),(100,0),(100,1);
+select count(*) from t1;
+count(*)
+85
+select count(*) from t1 where a=10;
+count(*)
+4
+select count(*) from t1 where a=100;
+count(*)
+2
+INSERT INTO t1_part SELECT * FROM t1;
INSERT INTO t2 SELECT * FROM t1;
-ANALYZE TABLE t1,t2;
+ANALYZE TABLE t1_part,t2;
Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status OK
+test.t1_part analyze status Engine-independent statistics collected
+test.t1_part analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
# plans should be identical
-EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10,100) GROUP BY a;
+EXPLAIN SELECT a, MAX(b) FROM t1_part WHERE a IN (10,100) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index
+1 SIMPLE t1_part range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 5 NULL 2 Using where; Using index
+1 SIMPLE t2 range a a 5 NULL 2 Using where; Using index for group-by
FLUSH status;
-SELECT a, MAX(b) FROM t1 WHERE a IN (10, 100) GROUP BY a;
+SELECT a, MAX(b) FROM t1_part WHERE a IN (10, 100) GROUP BY a;
a MAX(b)
10 10
-# Should be no more than 4 reads.
-SHOW status LIKE 'handler_read_key';
+100 1
+SHOW status LIKE 'handler_read%';
Variable_name Value
-Handler_read_key 2
+Handler_read_first 0
+Handler_read_key 6
+Handler_read_last 1
+Handler_read_next 0
+Handler_read_prev 0
+Handler_read_retry 0
+Handler_read_rnd 0
+Handler_read_rnd_deleted 0
+Handler_read_rnd_next 0
FLUSH status;
SELECT a, MAX(b) FROM t2 WHERE a IN (10, 100) GROUP BY a;
a MAX(b)
10 10
-# Should be no more than 4 reads.
-SHOW status LIKE 'handler_read_key';
+100 1
+SHOW status LIKE 'handler_read%';
Variable_name Value
-Handler_read_key 2
+Handler_read_first 0
+Handler_read_key 6
+Handler_read_last 1
+Handler_read_next 0
+Handler_read_prev 0
+Handler_read_retry 0
+Handler_read_rnd 0
+Handler_read_rnd_deleted 0
+Handler_read_rnd_next 0
+insert into t2 select 100,seq from seq_1_to_100;
+EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range a a 5 NULL 2 Using where; Using index for group-by
+DROP TABLE t1,t1_part,t2;
#
# MDEV-18501 Partition pruning doesn't work for historical queries
#
@@ -1023,7 +1063,7 @@ d
select * from t1 partition (p1);
d
2000-01-01 00:00:01.000000
-DROP TABLE t1, t2;
+DROP TABLE t1;
#
# MDEV-21195 INSERT chooses wrong partition for RANGE partitioning by DECIMAL column
#
diff --git a/mysql-test/main/partition_range.test b/mysql-test/main/partition_range.test
index f56851217cf..a7073122bbb 100644
--- a/mysql-test/main/partition_range.test
+++ b/mysql-test/main/partition_range.test
@@ -17,6 +17,7 @@ drop table if exists t1, t2;
CREATE TABLE t1 (a INT,b INT,KEY a (a,b));
INSERT INTO `t1` VALUES (0,580092),(3000,894076),(4000,805483),(4000,913540),(6000,611137),(8000,171602),(9000,599495),(9000,746305),(10000,272829),(10000,847519),(12000,258869),(12000,929028),(13000,288970),(15000,20971),(15000,105839),(16000,788272),(17000,76914),(18000,827274),(19000,802258),(20000,123677),(20000,587729),(22000,701449),(25000,31565),(25000,230782),(25000,442887),(25000,733139),(25000,851020);
+SELECT COUNT(*) from t1 where a IN (10000, 1000000, 3000);
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10000, 1000000, 3000) GROUP BY a;
alter table t1 partition by hash(a) partitions 1;
@@ -941,10 +942,17 @@ drop table t1, t2;
--echo # Bug#50939: Loose Index Scan unduly relies on engine to remember range
--echo # endpoints
--echo #
+
CREATE TABLE t1 (
a INT,
b INT,
KEY ( a, b )
+);
+
+CREATE TABLE t1_part (
+ a INT,
+ b INT,
+ KEY ( a, b )
) PARTITION BY HASH (a) PARTITIONS 1;
CREATE TABLE t2 (
@@ -959,24 +967,35 @@ INSERT INTO t1 SELECT a + 5, b + 5 FROM t1;
INSERT INTO t1 SELECT a + 10, b + 10 FROM t1;
INSERT INTO t1 SELECT a + 20, b + 20 FROM t1;
INSERT INTO t1 SELECT a + 40, b + 40 FROM t1;
+INSERT INTO t1 values(10,0),(10,1),(10,2),(100,0),(100,1);
+select count(*) from t1;
+select count(*) from t1 where a=10;
+select count(*) from t1 where a=100;
+INSERT INTO t1_part SELECT * FROM t1;
INSERT INTO t2 SELECT * FROM t1;
-ANALYZE TABLE t1,t2;
+ANALYZE TABLE t1_part,t2;
--echo # plans should be identical
-EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10,100) GROUP BY a;
+EXPLAIN SELECT a, MAX(b) FROM t1_part WHERE a IN (10,100) GROUP BY a;
EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
+# view protocol will cause changed table counters
+--disable_view_protocol
FLUSH status;
-SELECT a, MAX(b) FROM t1 WHERE a IN (10, 100) GROUP BY a;
---echo # Should be no more than 4 reads.
-SHOW status LIKE 'handler_read_key';
+SELECT a, MAX(b) FROM t1_part WHERE a IN (10, 100) GROUP BY a;
+SHOW status LIKE 'handler_read%';
FLUSH status;
SELECT a, MAX(b) FROM t2 WHERE a IN (10, 100) GROUP BY a;
---echo # Should be no more than 4 reads.
-SHOW status LIKE 'handler_read_key';
+SHOW status LIKE 'handler_read%';
+
+--enable_view_protocol
+insert into t2 select 100,seq from seq_1_to_100;
+EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
+
+DROP TABLE t1,t1_part,t2;
--echo #
--echo # MDEV-18501 Partition pruning doesn't work for historical queries
@@ -1005,7 +1024,7 @@ insert into t1 values
select * from t1 partition (p0);
select * from t1 partition (p1);
-DROP TABLE t1, t2;
+DROP TABLE t1;
--echo #
--echo # MDEV-21195 INSERT chooses wrong partition for RANGE partitioning by DECIMAL column
diff --git a/mysql-test/main/percona_nonflushing_analyze_debug.result b/mysql-test/main/percona_nonflushing_analyze_debug.result
index 78da085f26f..c3388fa42ed 100644
--- a/mysql-test/main/percona_nonflushing_analyze_debug.result
+++ b/mysql-test/main/percona_nonflushing_analyze_debug.result
@@ -1,7 +1,7 @@
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1), (2), (3);
connect con1,localhost,root;
-SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
+SET DEBUG_SYNC="handler_rnd_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
SELECT * FROM t1;
connection default;
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
diff --git a/mysql-test/main/plugin_auth.test b/mysql-test/main/plugin_auth.test
index 1c471cab9b5..dfc0572afd5 100644
--- a/mysql-test/main/plugin_auth.test
+++ b/mysql-test/main/plugin_auth.test
@@ -506,7 +506,7 @@ SELECT IS_NULLABLE, COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS
COLUMN_NAME IN ('plugin', 'authentication_string')
ORDER BY COLUMN_NAME;
let $datadir= `select @@datadir`;
-remove_file $datadir/mysql_upgrade_info;
+remove_file $datadir/mariadb_upgrade_info;
drop table mysql.global_priv;
rename table mysql.global_priv_bak to mysql.global_priv;
diff --git a/mysql-test/main/pool_of_threads.result b/mysql-test/main/pool_of_threads.result
index 91ad7ab098f..718cb7dc8ce 100644
--- a/mysql-test/main/pool_of_threads.result
+++ b/mysql-test/main/pool_of_threads.result
@@ -1,5 +1,6 @@
SET optimizer_switch='outer_join_with_cache=off';
drop table if exists t1,t2,t3,t4;
+set @@default_storage_engine="aria";
CREATE TABLE t1 (
Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
@@ -600,6 +601,9 @@ explain select t3.t2nr,fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL fld1 NULL NULL NULL 1199 Using where; Using temporary; Using filesort
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.fld1 1 Using where; Using index
+#
+# Some test with ORDER BY and limit
+#
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using filesort
@@ -1289,7 +1293,7 @@ companynr tinyint(2) unsigned zerofill NOT NULL default '00',
companyname char(30) NOT NULL default '',
PRIMARY KEY (companynr),
UNIQUE KEY companyname(companyname)
-) ENGINE=MyISAM MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
+) ENGINE=aria MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
select STRAIGHT_JOIN t2.companynr,companyname from t4,t2 where t2.companynr=t4.companynr group by t2.companynr;
companynr companyname
00 Unknown
@@ -1379,6 +1383,9 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
delete from t2 where fld1=999999;
+#
+# Test left join optimization
+#
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
@@ -1393,15 +1400,15 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 and companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr is null;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1417,11 +1424,11 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0 or companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where ifnull(companynr,1)>0;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result
index d5e501b06ef..d60fa28fc96 100644
--- a/mysql-test/main/ps.result
+++ b/mysql-test/main/ps.result
@@ -244,8 +244,6 @@ prepare stmt1 from "insert into t1 select i from t1";
execute stmt1;
execute stmt1;
prepare stmt1 from "select * from t1 into outfile '<MYSQLTEST_VARDIR>/tmp/f1.txt'";
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt1;
deallocate prepare stmt1;
drop table t1;
@@ -5554,13 +5552,11 @@ CREATE TABLE t2 (c2 INT) ENGINE=MyISAM;
CREATE TABLE t3 (c3 INT) ENGINE=MyISAM;
EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 FROM t1 AS a11 STRAIGHT_JOIN t2 AS a21 ON a21.c2 = a11.c1 JOIN t1 AS a12 ON a12.c1 = a11.c1 ) d1 );
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00
-2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
PREPARE stmt FROM "EXPLAIN EXTENDED UPDATE t3 SET c3 = ( SELECT COUNT(d1.c1) FROM ( SELECT a11.c1 FROM t1 AS a11 STRAIGHT_JOIN t2 AS a21 ON a21.c2 = a11.c1 JOIN t1 AS a12 ON a12.c1 = a11.c1 ) d1 )";
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t3 ALL NULL NULL NULL NULL 0 100.00
-2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
DEALLOCATE PREPARE stmt;
DROP TABLE t1, t2, t3;
#
diff --git a/mysql-test/main/ps_1general.result b/mysql-test/main/ps_1general.result
index ca2447b6b26..b71057248b6 100644
--- a/mysql-test/main/ps_1general.result
+++ b/mysql-test/main/ps_1general.result
@@ -451,7 +451,7 @@ def Extra 253 255 14 N 1 39 8
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 4 Using filesort
SET @arg00=1 ;
-prepare stmt1 from ' explain select a from t1 where a > ? order by b ';
+prepare stmt1 from ' explain select a from t1 force index (primary) where a > ? order by b ';
execute stmt1 using @arg00;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def id 8 3 1 Y 32928 0 63
diff --git a/mysql-test/main/ps_1general.test b/mysql-test/main/ps_1general.test
index 20f2ad019f4..c98fc4a5619 100644
--- a/mysql-test/main/ps_1general.test
+++ b/mysql-test/main/ps_1general.test
@@ -437,8 +437,10 @@ prepare stmt3 from ' unlock tables ' ;
## Load/Unload table contents
--let $datafile = $MYSQLTEST_VARDIR/tmp/data.txt
+--disable_warnings
--error 0,1
--remove_file $datafile
+--enable_warnings
--replace_result $MYSQLTEST_VARDIR <MYSQLTEST_VARDIR>
eval prepare stmt1 from ' load data infile ''$datafile''
@@ -497,7 +499,7 @@ prepare stmt1 from ' explain select a from t1 order by b ';
execute stmt1;
--disable_metadata
SET @arg00=1 ;
-prepare stmt1 from ' explain select a from t1 where a > ? order by b ';
+prepare stmt1 from ' explain select a from t1 force index (primary) where a > ? order by b ';
--enable_metadata
--replace_result 4096 4_OR_8_K 8192 4_OR_8_K
execute stmt1 using @arg00;
diff --git a/mysql-test/main/ps_ddl.result b/mysql-test/main/ps_ddl.result
index dcbb6982702..00243b93acc 100644
--- a/mysql-test/main/ps_ddl.result
+++ b/mysql-test/main/ps_ddl.result
@@ -20,8 +20,6 @@ else
select '' as "SUCCESS";
end if;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
set @reprepare_count= 0;
flush status;
=====================================================================
@@ -1075,8 +1073,6 @@ call p1(x);
return x;
end|
create procedure p1(out x int) select max(a) from t1 into x;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
prepare stmt from "select * from v1";
execute stmt;
f1()
@@ -1089,8 +1085,6 @@ SUCCESS
drop procedure p1;
create procedure p1(out x int) select max(a) from t2 into x;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# XXX: used to be a bug. The prelocked list was not invalidated
# and we kept opening table t1, whereas the procedure
# is now referring to table t2
diff --git a/mysql-test/main/ps_ddl1.result b/mysql-test/main/ps_ddl1.result
index 5178ee64f16..667cbed8a7a 100644
--- a/mysql-test/main/ps_ddl1.result
+++ b/mysql-test/main/ps_ddl1.result
@@ -20,8 +20,6 @@ else
select '' as "SUCCESS";
end if;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
set @reprepare_count= 0;
flush status;
drop table if exists t1;
diff --git a/mysql-test/main/query_cache.result b/mysql-test/main/query_cache.result
index f78a6ccc388..e73f219e1b0 100644
--- a/mysql-test/main/query_cache.result
+++ b/mysql-test/main/query_cache.result
@@ -646,13 +646,9 @@ show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
select * from t1 into outfile "query_cache.out.file";
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select * from t1 into outfile "query_cache.out.file";
ERROR HY000: File 'query_cache.out.file' already exists
select * from t1 limit 1 into dumpfile "query_cache.dump.file";
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
@@ -1105,8 +1101,6 @@ Declare var1 int;
select max(a) from t1 into var1;
return var1;
end//
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create procedure `p1`()
begin
select a, f1() from t1;
diff --git a/mysql-test/main/range.result b/mysql-test/main/range.result
index 6ff4f409666..a352252c616 100644
--- a/mysql-test/main/range.result
+++ b/mysql-test/main/range.result
@@ -281,7 +281,7 @@ INSERT INTO t1 VALUES
(33,5),(33,5),(33,5),(33,5),(34,5),(35,5);
EXPLAIN SELECT * FROM t1 WHERE a IN(1,2) AND b=5;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range|filter a,b a|b 5|5 NULL 2 (41%) Using index condition; Using where; Using rowid filter
+1 SIMPLE t1 range a,b a 5 NULL 2 Using index condition; Using where
SELECT * FROM t1 WHERE a IN(1,2) AND b=5;
a b
DROP TABLE t1;
@@ -309,6 +309,9 @@ a b
15 1
47 1
DROP TABLE t1;
+#
+# Test of problem with IN on many different keyparts. (Bug #4157)
+#
CREATE TABLE t1 (
id int( 11 ) unsigned NOT NULL AUTO_INCREMENT ,
line int( 5 ) unsigned NOT NULL default '0',
@@ -325,10 +328,17 @@ KEY `LINES` ( owner, tableid, content, id ) ,
KEY recount( owner, line )
) ENGINE = MYISAM;
INSERT into t1 (owner,id,columnid,line) values (11,15,15,1),(11,13,13,5);
+INSERT into t1 (owner,id,columnid,line) select 11,seq+20,seq,seq from seq_1_to_100;
+explain SELECT id, columnid, tableid, content, showid, line, ordinal FROM t1 WHERE owner=11 AND ((columnid IN ( 15, 13, 14 ) AND line IN ( 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 31 )) OR (columnid IN ( 13, 14 ) AND line IN ( 15 ))) LIMIT 0 , 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref PRIMARY,menu,COLUMN,LINES,recount COLUMN 4 const 11 Using index condition
SELECT id, columnid, tableid, content, showid, line, ordinal FROM t1 WHERE owner=11 AND ((columnid IN ( 15, 13, 14 ) AND line IN ( 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 31 )) OR (columnid IN ( 13, 14 ) AND line IN ( 15 ))) LIMIT 0 , 30;
id columnid tableid content showid line ordinal
-15 15 1 188 1 1 0
13 13 1 188 1 5 0
+15 15 1 188 1 1 0
+33 13 1 188 1 13 0
+34 14 1 188 1 14 0
+35 15 1 188 1 15 0
drop table t1;
create table t1 (id int(10) primary key);
insert into t1 values (1),(2),(3),(4),(5),(6),(7),(8),(9);
@@ -723,7 +733,7 @@ WHERE
v.oxrootid ='d8c4177d09f8b11f5.52725521' AND
s.oxleft > v.oxleft AND s.oxleft < v.oxright;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE v ALL OXLEFT,OXRIGHT,OXROOTID NULL NULL NULL 12 Using where
+1 SIMPLE v ref OXLEFT,OXRIGHT,OXROOTID OXROOTID 34 const 6 Using index condition
1 SIMPLE s ALL OXLEFT NULL NULL NULL 12 Range checked for each record (index map: 0x4)
SELECT s.oxid FROM t1 v, t1 s
WHERE
@@ -1236,14 +1246,16 @@ insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t2 (a int, b int, filler char(100));
insert into t2 select A.a + 10 * (B.a + 10 * C.a), 10, 'filler' from t1 A,
t1 B, t1 C where A.a < 5;
-insert into t2 select 1000, b, 'filler' from t2;
+insert into t2 select 1000, b, 'filler' from t2 limit 50;
+select count(*) from t2;
+count(*)
+550
alter table t2 add index (a,b);
-select 'In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)' Z;
-Z
-In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)
+# In following EXPLAIN the access method should be ref, #rows~=50
+# (and not 2) when we are not using rowid-ordered scans
explain select * from t2 where a=1000 and b<11;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref a a 5 const 503 Using index condition
+1 SIMPLE t2 range a a 10 NULL 63 Using index condition
drop table t1, t2;
CREATE TABLE t1( a INT, b INT, KEY( a, b ) );
CREATE TABLE t2( a INT, b INT, KEY( a, b ) );
@@ -2412,6 +2424,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2421,7 +2434,9 @@ EXPLAIN
"key": "idx",
"key_length": "10",
"used_key_parts": ["a", "b"],
+ "loops": 1,
"rows": 6,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a,t1.b) in (<cache>((2,3)),<cache>((3,3)),<cache>((8,8)),<cache>((7,7)))"
}
@@ -2476,6 +2491,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2485,7 +2501,9 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a,t1.b + t1.a) in (<cache>((4,9)),<cache>((8,8)),<cache>((7,7)))"
}
@@ -2506,6 +2524,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2515,7 +2534,9 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a,t1.b) in ((4,t1.a - 1),(8,t1.a + 8),(7,t1.a + 7))"
}
@@ -2542,7 +2563,7 @@ insert into t2 values
explain select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(2,2));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range|filter idx1,idx2 idx1|idx2 5|5 NULL 3 (60%) Using index condition; Using where; Using rowid filter
+1 SIMPLE t2 range idx1,idx2 idx1 5 NULL 3 Using index condition; Using where
1 SIMPLE t1 ref idx idx 5 test.t2.d 8
explain format=json select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(2,2));
@@ -2550,6 +2571,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2559,15 +2581,9 @@ EXPLAIN
"key": "idx1",
"key_length": "5",
"used_key_parts": ["d"],
- "rowid_filter": {
- "range": {
- "key": "idx2",
- "used_key_parts": ["e"]
- },
- "rows": 12,
- "selectivity_pct": 60
- },
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 60,
"index_condition": "t2.d is not null",
"attached_condition": "(t2.d,t2.e) in (<cache>((3,3)),<cache>((7,7)),<cache>((2,2)))"
@@ -2582,7 +2598,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.d"],
+ "loops": 1.8,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2636,6 +2654,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2653,7 +2672,9 @@ EXPLAIN
"rows": 15,
"selectivity_pct": 14.42307692
},
+ "loops": 1,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 14.42307663,
"index_condition": "t2.d is not null",
"attached_condition": "(t2.d,t2.e) in (<cache>((3,3)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1"
@@ -2668,8 +2689,10 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.d"],
+ "loops": 1.153846154,
"rows": 8,
- "filtered": 100
+ "cost": "COST_REPLACED",
+ "filtered": 73.17073059
}
}
]
@@ -2678,60 +2701,60 @@ EXPLAIN
select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1;
a b c d e f
-3 2 uuuw 3 3 i
3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
-3 3 zyxw 3 3 i
-3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
-3 2 uuuw 3 3 i
3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
+3 2 uuuw 3 3 i
+3 2 uuuw 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxw 3 3 i
3 3 zyxw 3 3 i
3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
-7 7 xxxyy 7 7 h
+3 3 zzza 3 3 i
+3 3 zzzz 3 3 i
+3 3 zzzz 3 3 i
7 7 xxxya 7 7 h
-7 8 xxxxx 7 7 h
+7 7 xxxyy 7 7 h
7 8 xxxxa 7 7 h
+7 8 xxxxx 7 7 h
prepare stmt from "select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1";
execute stmt;
a b c d e f
-3 2 uuuw 3 3 i
3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
-3 3 zyxw 3 3 i
-3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
-3 2 uuuw 3 3 i
3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
+3 2 uuuw 3 3 i
+3 2 uuuw 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxw 3 3 i
3 3 zyxw 3 3 i
3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
-7 7 xxxyy 7 7 h
+3 3 zzza 3 3 i
+3 3 zzzz 3 3 i
+3 3 zzzz 3 3 i
7 7 xxxya 7 7 h
-7 8 xxxxx 7 7 h
+7 7 xxxyy 7 7 h
7 8 xxxxa 7 7 h
+7 8 xxxxx 7 7 h
execute stmt;
a b c d e f
-3 2 uuuw 3 3 i
3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
-3 3 zyxw 3 3 i
-3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
-3 2 uuuw 3 3 i
3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
+3 2 uuuw 3 3 i
+3 2 uuuw 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxw 3 3 i
3 3 zyxw 3 3 i
3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
-7 7 xxxyy 7 7 h
+3 3 zzza 3 3 i
+3 3 zzzz 3 3 i
+3 3 zzzz 3 3 i
7 7 xxxya 7 7 h
-7 8 xxxxx 7 7 h
+7 7 xxxyy 7 7 h
7 8 xxxxa 7 7 h
+7 8 xxxxx 7 7 h
deallocate prepare stmt;
insert into t1 select * from t1;
# join order: (t2,t1) with ref access of t1
@@ -2747,6 +2770,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2764,7 +2788,9 @@ EXPLAIN
"rows": 7,
"selectivity_pct": 6.730769231
},
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 6.730769157,
"index_condition": "t2.d is not null",
"attached_condition": "(t2.d,t2.e) in (<cache>((4,4)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1"
@@ -2779,7 +2805,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.d"],
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2789,14 +2817,14 @@ EXPLAIN
select * from t1,t2
where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1;
a b c d e f
-7 7 xxxyy 7 7 h
7 7 xxxya 7 7 h
-7 7 xxxyy 7 7 h
7 7 xxxya 7 7 h
-7 8 xxxxx 7 7 h
+7 7 xxxyy 7 7 h
+7 7 xxxyy 7 7 h
7 8 xxxxa 7 7 h
-7 8 xxxxx 7 7 h
7 8 xxxxa 7 7 h
+7 8 xxxxx 7 7 h
+7 8 xxxxx 7 7 h
alter table t2 drop index idx1, drop index idx2, add index idx3(d,e);
# join order: (t2,t1) with ref access of t1
# range access to t2 by 2-component keys for index idx3
@@ -2811,6 +2839,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2820,7 +2849,9 @@ EXPLAIN
"key": "idx3",
"key_length": "10",
"used_key_parts": ["d", "e"],
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t2.d is not null",
"attached_condition": "(t2.d,t2.e) in (<cache>((4,4)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1"
@@ -2835,7 +2866,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.d"],
+ "loops": 5,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2866,6 +2899,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2875,7 +2909,9 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 15,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.a is not null"
}
@@ -2889,7 +2925,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"ref": ["test.t1.a"],
+ "loops": 15,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a,t2.e) in ((4,t1.a + 1),(7,t1.a + 1),(8,t1.a + 1)) and octet_length(t2.f) = 1"
}
@@ -2900,22 +2938,22 @@ EXPLAIN
select * from t1,t2
where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1;
a b c d e f
-4 3 zyx 4 5 a
4 3 zya 4 5 a
-4 3 zyx 4 5 a
4 3 zya 4 5 a
-4 5 ww 4 5 a
+4 3 zyx 4 5 a
+4 3 zyx 4 5 a
4 5 wa 4 5 a
-4 5 ww 4 5 a
4 5 wa 4 5 a
-7 7 xxxyy 7 8 b
+4 5 ww 4 5 a
+4 5 ww 4 5 a
7 7 xxxya 7 8 b
-7 7 xxxyy 7 8 b
7 7 xxxya 7 8 b
-7 8 xxxxx 7 8 b
+7 7 xxxyy 7 8 b
+7 7 xxxyy 7 8 b
7 8 xxxxa 7 8 b
-7 8 xxxxx 7 8 b
7 8 xxxxa 7 8 b
+7 8 xxxxx 7 8 b
+7 8 xxxxx 7 8 b
# join order: (t1,t2) with ref access of t2
# no range access
explain select * from t1,t2
@@ -2929,13 +2967,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
"possible_keys": ["idx"],
+ "loops": 1,
"rows": 144,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a is not null"
}
@@ -2949,7 +2990,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"ref": ["test.t1.a"],
+ "loops": 144,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a,t2.e) in ((t2.e,t1.a + 1),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1"
}
@@ -2960,14 +3003,14 @@ EXPLAIN
select * from t1,t2
where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1;
a b c d e f
-7 8 xxxxx 7 7 h
-7 7 xxxyy 7 7 h
-7 8 xxxxa 7 7 h
7 7 xxxya 7 7 h
-7 8 xxxxx 7 7 h
+7 7 xxxya 7 7 h
+7 7 xxxyy 7 7 h
7 7 xxxyy 7 7 h
7 8 xxxxa 7 7 h
-7 7 xxxya 7 7 h
+7 8 xxxxa 7 7 h
+7 8 xxxxx 7 7 h
+7 8 xxxxx 7 7 h
# join order: (t1,t2) with ref access of t2
# range access to t1 by 1-component keys for index idx
explain select * from t1,t2
@@ -2983,6 +3026,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2992,7 +3036,9 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.a is not null",
"attached_condition": "(t1.a,2) in (<cache>((2,2)),<cache>((7,7)),<cache>((8,8))) and octet_length(t1.c) = 1"
@@ -3007,7 +3053,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"ref": ["test.t1.a"],
+ "loops": 12,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "octet_length(t2.f) = 1"
}
@@ -3066,6 +3114,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -3088,7 +3137,9 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.a is not null",
"attached_condition": "(t1.a,1 + 1) in (<cache>((2,2)),<cache>((7,7)),<cache>((8,8))) and octet_length(t1.c) = 1"
@@ -3103,7 +3154,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"ref": ["test.t1.a"],
+ "loops": 12,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "octet_length(t2.f) = 1"
}
@@ -3381,7 +3434,7 @@ insert into t2 select A.a + B.a*10 + C.a*100 from ten A, ten B,ten C where A.a +
# expected type=range, rows=1487 , reason=using index dives
analyze SELECT * FROM t1 where a in (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198);
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE t1 range a a 5 NULL 1487 1199.00 100.00 100.00 Using where; Using index
+1 SIMPLE t1 index a a 5 NULL 2000 2000.00 74.35 59.95 Using where; Using index
insert into t2 values (200),(201);
# expected type=range, rows=201 , reason=using index statistics
analyze SELECT * FROM t1 where a in (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,200,201);
diff --git a/mysql-test/main/range.test b/mysql-test/main/range.test
index c42670c1ed1..c2cc794b485 100644
--- a/mysql-test/main/range.test
+++ b/mysql-test/main/range.test
@@ -3,6 +3,8 @@
# Problem with range optimizer
#
--source include/have_innodb.inc
+--source include/have_sequence.inc
+
SET optimizer_use_condition_selectivity=4;
set @innodb_stats_persistent_save= @@innodb_stats_persistent;
@@ -263,9 +265,9 @@ WHERE
);
DROP TABLE t1;
-#
-# Test of problem with IN on many different keyparts. (Bug #4157)
-#
+--echo #
+--echo # Test of problem with IN on many different keyparts. (Bug #4157)
+--echo #
CREATE TABLE t1 (
id int( 11 ) unsigned NOT NULL AUTO_INCREMENT ,
@@ -284,7 +286,10 @@ KEY recount( owner, line )
) ENGINE = MYISAM;
INSERT into t1 (owner,id,columnid,line) values (11,15,15,1),(11,13,13,5);
+INSERT into t1 (owner,id,columnid,line) select 11,seq+20,seq,seq from seq_1_to_100;
+explain SELECT id, columnid, tableid, content, showid, line, ordinal FROM t1 WHERE owner=11 AND ((columnid IN ( 15, 13, 14 ) AND line IN ( 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 31 )) OR (columnid IN ( 13, 14 ) AND line IN ( 15 ))) LIMIT 0 , 30;
+--sorted_result
SELECT id, columnid, tableid, content, showid, line, ordinal FROM t1 WHERE owner=11 AND ((columnid IN ( 15, 13, 14 ) AND line IN ( 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 31 )) OR (columnid IN ( 13, 14 ) AND line IN ( 15 ))) LIMIT 0 , 30;
drop table t1;
@@ -1025,7 +1030,8 @@ create table t2 (a int, b int, filler char(100));
insert into t2 select A.a + 10 * (B.a + 10 * C.a), 10, 'filler' from t1 A,
t1 B, t1 C where A.a < 5;
-insert into t2 select 1000, b, 'filler' from t2;
+insert into t2 select 1000, b, 'filler' from t2 limit 50;
+select count(*) from t2;
alter table t2 add index (a,b);
# t2 values
# ( 1 , 10, 'filler')
@@ -1033,13 +1039,14 @@ alter table t2 add index (a,b);
# ( 3 , 10, 'filler')
# (... , 10, 'filler')
# ...
-# (1000, 10, 'filler') - 500 times
+# (1000, 10, 'filler') - 100 times
-# 500 rows, 1 row
+# 50 rows, 1 row
-select 'In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)' Z;
-explain select * from t2 where a=1000 and b<11;
+--echo # In following EXPLAIN the access method should be ref, #rows~=50
+--echo # (and not 2) when we are not using rowid-ordered scans
+explain select * from t2 where a=1000 and b<11;
drop table t1, t2;
#
@@ -1913,6 +1920,7 @@ insert into t1 values
let $q1=
select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7));
eval explain $q1;
+--source include/explain-no-costs.inc
eval explain format=json $q1;
eval $q1;
eval prepare stmt from "$q1";
@@ -1924,6 +1932,7 @@ deallocate prepare stmt;
let $q2=
select * from t1 where (a,b+a) IN ((4,9),(8,8),(7,7));
eval explain $q2;
+--source include/explain-no-costs.inc
eval explain format=json $q2;
eval $q2;
@@ -1931,6 +1940,7 @@ eval $q2;
let $q3=
select * from t1 where (a,b) IN ((4,a-1),(8,a+8),(7,a+7));
eval explain $q3;
+--source include/explain-no-costs.inc
eval explain format=json $q3;
eval $q3;
@@ -1954,6 +1964,7 @@ let $q4=
select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(2,2));
eval explain $q4;
+--source include/explain-no-costs.inc
eval explain format=json $q4;
eval $q4;
@@ -1979,10 +1990,14 @@ let $q5=
select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1;
eval explain $q5;
+--source include/explain-no-costs.inc
eval explain format=json $q5;
+--sorted_result
eval $q5;
eval prepare stmt from "$q5";
+--sorted_result
execute stmt;
+--sorted_result
execute stmt;
deallocate prepare stmt;
@@ -1994,7 +2009,9 @@ let $q6=
select * from t1,t2
where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1;
eval explain $q6;
+--source include/explain-no-costs.inc
eval explain format=json $q6;
+--sorted_result
eval $q6;
alter table t2 drop index idx1, drop index idx2, add index idx3(d,e);
@@ -2005,6 +2022,7 @@ let $q7=
select * from t1,t2
where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1;
eval explain $q7;
+--source include/explain-no-costs.inc
eval explain format=json $q7;
eval $q7;
@@ -2014,7 +2032,9 @@ let $q8=
select * from t1,t2
where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1;
eval explain $q8;
+--source include/explain-no-costs.inc
eval explain format=json $q8;
+--sorted_result
eval $q8;
--echo # join order: (t1,t2) with ref access of t2
@@ -2023,7 +2043,9 @@ let $q9=
select * from t1,t2
where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1;
eval explain $q9;
+--source include/explain-no-costs.inc
eval explain format=json $q9;
+--sorted_result
eval $q9;
--echo # join order: (t1,t2) with ref access of t2
@@ -2033,6 +2055,7 @@ select * from t1,t2
where a = d and (a,2) in ((2,2),(7,7),(8,8)) and
length(c) = 1 and length(f) = 1;
eval explain $q10;
+--source include/explain-no-costs.inc
eval explain format=json $q10;
eval $q10;
eval prepare stmt from "$q10";
@@ -2053,6 +2076,7 @@ select * from t1,t2,t3
(a,v+1) in ((2,2),(7,7),(8,8)) and
length(c) = 1 and length(f) = 1;
eval explain $q11;
+--source include/explain-no-costs.inc
eval explain format=json $q11;
eval $q11;
diff --git a/mysql-test/main/range_aria_dbt3.result b/mysql-test/main/range_aria_dbt3.result
index f08a1b244f8..be3bc79c3a8 100644
--- a/mysql-test/main/range_aria_dbt3.result
+++ b/mysql-test/main/range_aria_dbt3.result
@@ -19,6 +19,16 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT COUNT(*) FROM lineitem WHERE l_orderkey BETWEEN 111 AND 262 OR ( l_orderkey BETWEEN 152 AND 672 AND l_linenumber BETWEEN 4 AND 9 );
COUNT(*)
293
+#
+# MDEV-30486 Table is not eliminated in bb-11.0
+#
+explain SELECT c_custkey, c_name AS currency2 FROM partsupp LEFT JOIN part ON ( p_partkey = ps_partkey ) JOIN supplier ON (s_suppkey = ps_suppkey) JOIN lineitem ON ( ps_suppkey = l_suppkey ) JOIN orders ON ( l_orderkey = o_orderkey ) JOIN customer ON ( o_custkey = c_custkey ) HAVING c_custkey > 150;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE supplier index PRIMARY PRIMARY 4 NULL 10 Using index
+1 SIMPLE partsupp ref i_ps_suppkey i_ps_suppkey 4 dbt3_s001.supplier.s_suppkey 16
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey 5 dbt3_s001.supplier.s_suppkey 100
+1 SIMPLE orders eq_ref PRIMARY,i_o_custkey PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+1 SIMPLE customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey 1
DROP DATABASE dbt3_s001;
#
# End of 10.5 tests
diff --git a/mysql-test/main/range_aria_dbt3.test b/mysql-test/main/range_aria_dbt3.test
index 141bf43885b..b373d5b999b 100644
--- a/mysql-test/main/range_aria_dbt3.test
+++ b/mysql-test/main/range_aria_dbt3.test
@@ -26,6 +26,12 @@ explain SELECT COUNT(*) FROM lineitem WHERE l_orderkey BETWEEN 111 AND 262 OR (
SELECT COUNT(*) FROM lineitem WHERE l_orderkey BETWEEN 111 AND 262 OR ( l_orderkey BETWEEN 152 AND 672 AND l_linenumber BETWEEN 4 AND 9 );
+--echo #
+--echo # MDEV-30486 Table is not eliminated in bb-11.0
+--echo #
+
+explain SELECT c_custkey, c_name AS currency2 FROM partsupp LEFT JOIN part ON ( p_partkey = ps_partkey ) JOIN supplier ON (s_suppkey = ps_suppkey) JOIN lineitem ON ( ps_suppkey = l_suppkey ) JOIN orders ON ( l_orderkey = o_orderkey ) JOIN customer ON ( o_custkey = c_custkey ) HAVING c_custkey > 150;
+
DROP DATABASE dbt3_s001;
--echo #
diff --git a/mysql-test/main/range_innodb.result b/mysql-test/main/range_innodb.result
index eddfbfd0d62..542d0eed294 100644
--- a/mysql-test/main/range_innodb.result
+++ b/mysql-test/main/range_innodb.result
@@ -53,9 +53,9 @@ set optimizer_switch='extended_keys=on';
explain
select pk, a, b from t1,t2,t3 where b >= d and pk < c and b = '0';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 2
-1 SIMPLE t1 ref PRIMARY,idx1,idx2 idx1 5 const 3 Using index condition
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t1 ref PRIMARY,idx1,idx2 idx1 5 const #
+1 SIMPLE t2 ALL NULL NULL NULL NULL # Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t3 ALL NULL NULL NULL NULL # Using where; Using join buffer (incremental, BNL join)
select pk, a, b from t1,t2,t3 where b >= d and pk < c and b = '0';
pk a b
1 6 0
@@ -83,6 +83,7 @@ drop table t1,t2;
# MDEV-14440: Server crash in in handler::ha_external_lock or Assertion `inited==RND'
# failed in handler::ha_rnd_end upon SELECT from partitioned table
#
+call mtr.add_suppression("Got error .* when reading table");
set @optimizer_switch_save= @@optimizer_switch;
set optimizer_switch='index_merge_sort_intersection=off';
create table t0(a int);
diff --git a/mysql-test/main/range_innodb.test b/mysql-test/main/range_innodb.test
index 276b9cea08f..35511279910 100644
--- a/mysql-test/main/range_innodb.test
+++ b/mysql-test/main/range_innodb.test
@@ -58,6 +58,8 @@ insert into t3 values (3),(-1),(4);
set @save_optimizer_switch=@@optimizer_switch;
set optimizer_switch='extended_keys=on';
+# InnoDB sometimes returns 4 other times 5 records for t1
+--replace_column 9 #
explain
select pk, a, b from t1,t2,t3 where b >= d and pk < c and b = '0';
select pk, a, b from t1,t2,t3 where b >= d and pk < c and b = '0';
@@ -89,6 +91,8 @@ drop table t1,t2;
--echo # failed in handler::ha_rnd_end upon SELECT from partitioned table
--echo #
+call mtr.add_suppression("Got error .* when reading table");
+
set @optimizer_switch_save= @@optimizer_switch;
set optimizer_switch='index_merge_sort_intersection=off';
create table t0(a int);
diff --git a/mysql-test/main/range_interrupted-13751.result b/mysql-test/main/range_interrupted-13751.result
index 68610cdda8e..eadd32bffa0 100644
--- a/mysql-test/main/range_interrupted-13751.result
+++ b/mysql-test/main/range_interrupted-13751.result
@@ -1,11 +1,11 @@
CREATE TABLE t1 (i INT AUTO_INCREMENT, c VARCHAR(1), KEY(i), KEY(c,i)) ENGINE=MyISAM;
-INSERT INTO t1 (c) VALUES ('a'),('b'),('c'),('d');
-INSERT INTO t1 (c) SELECT c FROM t1;
-INSERT INTO t1 (c) SELECT c FROM t1;
-INSERT INTO t1 (c) SELECT c FROM t1;
-INSERT INTO t1 (c) SELECT c FROM t1;
-INSERT INTO t1 (c) SELECT c FROM t1;
-INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) select mid("abcdefgh", mod(seq,8)+1, 1) from seq_1_to_256;
+explain SELECT 1 FROM t1 AS alias1, t1 AS alias2, t1 AS alias3
+WHERE alias1.c = alias2.c OR alias1.i <= 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE alias2 index c c 8 NULL 256 Using index
+1 SIMPLE alias3 index NULL i 4 NULL 256 Using index; Using join buffer (flat, BNL join)
+1 SIMPLE alias1 ALL i,c NULL NULL NULL 256 Range checked for each record (index map: 0x3)
set @old_dbug=@@session.debug_dbug;
SET debug_dbug="+d,kill_join_init_read_record";
SELECT 1 FROM t1 AS alias1, t1 AS alias2, t1 AS alias3
diff --git a/mysql-test/main/range_interrupted-13751.test b/mysql-test/main/range_interrupted-13751.test
index b0793edeb9d..939f15b2145 100644
--- a/mysql-test/main/range_interrupted-13751.test
+++ b/mysql-test/main/range_interrupted-13751.test
@@ -1,17 +1,15 @@
--- source include/have_debug.inc
+--source include/have_debug.inc
--source include/default_optimizer_switch.inc
+--source include/have_sequence.inc
#
# MDEV-13751 Interrupted SELECT fails with 1030: 'Got error 1 "Operation not permitted" from storage engine MyISAM'
#
CREATE TABLE t1 (i INT AUTO_INCREMENT, c VARCHAR(1), KEY(i), KEY(c,i)) ENGINE=MyISAM;
-INSERT INTO t1 (c) VALUES ('a'),('b'),('c'),('d');
-INSERT INTO t1 (c) SELECT c FROM t1;
-INSERT INTO t1 (c) SELECT c FROM t1;
-INSERT INTO t1 (c) SELECT c FROM t1;
-INSERT INTO t1 (c) SELECT c FROM t1;
-INSERT INTO t1 (c) SELECT c FROM t1;
-INSERT INTO t1 (c) SELECT c FROM t1;
+INSERT INTO t1 (c) select mid("abcdefgh", mod(seq,8)+1, 1) from seq_1_to_256;
+
+explain SELECT 1 FROM t1 AS alias1, t1 AS alias2, t1 AS alias3
+WHERE alias1.c = alias2.c OR alias1.i <= 1;
set @old_dbug=@@session.debug_dbug;
SET debug_dbug="+d,kill_join_init_read_record";
diff --git a/mysql-test/main/range_mrr_icp.result b/mysql-test/main/range_mrr_icp.result
index c2c715a0a89..ba81a6c4cba 100644
--- a/mysql-test/main/range_mrr_icp.result
+++ b/mysql-test/main/range_mrr_icp.result
@@ -312,6 +312,9 @@ a b
15 1
47 1
DROP TABLE t1;
+#
+# Test of problem with IN on many different keyparts. (Bug #4157)
+#
CREATE TABLE t1 (
id int( 11 ) unsigned NOT NULL AUTO_INCREMENT ,
line int( 5 ) unsigned NOT NULL default '0',
@@ -328,10 +331,17 @@ KEY `LINES` ( owner, tableid, content, id ) ,
KEY recount( owner, line )
) ENGINE = MYISAM;
INSERT into t1 (owner,id,columnid,line) values (11,15,15,1),(11,13,13,5);
+INSERT into t1 (owner,id,columnid,line) select 11,seq+20,seq,seq from seq_1_to_100;
+explain SELECT id, columnid, tableid, content, showid, line, ordinal FROM t1 WHERE owner=11 AND ((columnid IN ( 15, 13, 14 ) AND line IN ( 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 31 )) OR (columnid IN ( 13, 14 ) AND line IN ( 15 ))) LIMIT 0 , 30;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref PRIMARY,menu,COLUMN,LINES,recount COLUMN 4 const 11 Using index condition
SELECT id, columnid, tableid, content, showid, line, ordinal FROM t1 WHERE owner=11 AND ((columnid IN ( 15, 13, 14 ) AND line IN ( 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 31 )) OR (columnid IN ( 13, 14 ) AND line IN ( 15 ))) LIMIT 0 , 30;
id columnid tableid content showid line ordinal
-15 15 1 188 1 1 0
13 13 1 188 1 5 0
+15 15 1 188 1 1 0
+33 13 1 188 1 13 0
+34 14 1 188 1 14 0
+35 15 1 188 1 15 0
drop table t1;
create table t1 (id int(10) primary key);
insert into t1 values (1),(2),(3),(4),(5),(6),(7),(8),(9);
@@ -726,7 +736,7 @@ WHERE
v.oxrootid ='d8c4177d09f8b11f5.52725521' AND
s.oxleft > v.oxleft AND s.oxleft < v.oxright;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE v ALL OXLEFT,OXRIGHT,OXROOTID NULL NULL NULL 12 Using where
+1 SIMPLE v ref OXLEFT,OXRIGHT,OXROOTID OXROOTID 34 const 6 Using index condition
1 SIMPLE s ALL OXLEFT NULL NULL NULL 12 Range checked for each record (index map: 0x4)
SELECT s.oxid FROM t1 v, t1 s
WHERE
@@ -1239,14 +1249,16 @@ insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t2 (a int, b int, filler char(100));
insert into t2 select A.a + 10 * (B.a + 10 * C.a), 10, 'filler' from t1 A,
t1 B, t1 C where A.a < 5;
-insert into t2 select 1000, b, 'filler' from t2;
+insert into t2 select 1000, b, 'filler' from t2 limit 50;
+select count(*) from t2;
+count(*)
+550
alter table t2 add index (a,b);
-select 'In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)' Z;
-Z
-In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)
+# In following EXPLAIN the access method should be ref, #rows~=50
+# (and not 2) when we are not using rowid-ordered scans
explain select * from t2 where a=1000 and b<11;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref a a 5 const 503 Using index condition
+1 SIMPLE t2 range a a 10 NULL 63 Using index condition; Rowid-ordered scan
drop table t1, t2;
CREATE TABLE t1( a INT, b INT, KEY( a, b ) );
CREATE TABLE t2( a INT, b INT, KEY( a, b ) );
@@ -2415,6 +2427,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2424,7 +2437,9 @@ EXPLAIN
"key": "idx",
"key_length": "10",
"used_key_parts": ["a", "b"],
+ "loops": 1,
"rows": 6,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a,t1.b) in (<cache>((2,3)),<cache>((3,3)),<cache>((8,8)),<cache>((7,7)))",
"mrr_type": "Rowid-ordered scan"
@@ -2480,6 +2495,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2489,7 +2505,9 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a,t1.b + t1.a) in (<cache>((4,9)),<cache>((8,8)),<cache>((7,7)))",
"mrr_type": "Rowid-ordered scan"
@@ -2511,6 +2529,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2520,7 +2539,9 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a,t1.b) in ((4,t1.a - 1),(8,t1.a + 8),(7,t1.a + 7))",
"mrr_type": "Rowid-ordered scan"
@@ -2556,6 +2577,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2565,7 +2587,9 @@ EXPLAIN
"key": "idx1",
"key_length": "5",
"used_key_parts": ["d"],
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 60,
"index_condition": "t2.d is not null",
"attached_condition": "(t2.d,t2.e) in (<cache>((3,3)),<cache>((7,7)),<cache>((2,2)))",
@@ -2581,7 +2605,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.d"],
+ "loops": 1.8,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2635,6 +2661,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2644,7 +2671,9 @@ EXPLAIN
"key": "idx1",
"key_length": "5",
"used_key_parts": ["d"],
+ "loops": 1,
"rows": 8,
+ "cost": "COST_REPLACED",
"filtered": 14.42307663,
"index_condition": "t2.d is not null",
"attached_condition": "(t2.d,t2.e) in (<cache>((3,3)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1",
@@ -2660,8 +2689,10 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.d"],
+ "loops": 1.153846154,
"rows": 8,
- "filtered": 100
+ "cost": "COST_REPLACED",
+ "filtered": 73.17073059
}
}
]
@@ -2670,60 +2701,60 @@ EXPLAIN
select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1;
a b c d e f
-3 2 uuuw 3 3 i
3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
+3 2 uuua 3 3 i
+3 2 uuuw 3 3 i
+3 2 uuuw 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxw 3 3 i
3 3 zyxw 3 3 i
3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
-7 7 xxxyy 7 7 h
+3 3 zzza 3 3 i
+3 3 zzzz 3 3 i
+3 3 zzzz 3 3 i
7 7 xxxya 7 7 h
-7 8 xxxxx 7 7 h
+7 7 xxxyy 7 7 h
7 8 xxxxa 7 7 h
-3 2 uuuw 3 3 i
-3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
-3 3 zyxw 3 3 i
-3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
+7 8 xxxxx 7 7 h
prepare stmt from "select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1";
execute stmt;
a b c d e f
-3 2 uuuw 3 3 i
3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
+3 2 uuua 3 3 i
+3 2 uuuw 3 3 i
+3 2 uuuw 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxw 3 3 i
3 3 zyxw 3 3 i
3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
-7 7 xxxyy 7 7 h
+3 3 zzza 3 3 i
+3 3 zzzz 3 3 i
+3 3 zzzz 3 3 i
7 7 xxxya 7 7 h
-7 8 xxxxx 7 7 h
+7 7 xxxyy 7 7 h
7 8 xxxxa 7 7 h
-3 2 uuuw 3 3 i
-3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
-3 3 zyxw 3 3 i
-3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
+7 8 xxxxx 7 7 h
execute stmt;
a b c d e f
-3 2 uuuw 3 3 i
3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
+3 2 uuua 3 3 i
+3 2 uuuw 3 3 i
+3 2 uuuw 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxa 3 3 i
+3 3 zyxw 3 3 i
3 3 zyxw 3 3 i
3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
-7 7 xxxyy 7 7 h
+3 3 zzza 3 3 i
+3 3 zzzz 3 3 i
+3 3 zzzz 3 3 i
7 7 xxxya 7 7 h
-7 8 xxxxx 7 7 h
+7 7 xxxyy 7 7 h
7 8 xxxxa 7 7 h
-3 2 uuuw 3 3 i
-3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
-3 3 zyxw 3 3 i
-3 3 zzza 3 3 i
-3 3 zyxa 3 3 i
+7 8 xxxxx 7 7 h
deallocate prepare stmt;
insert into t1 select * from t1;
# join order: (t2,t1) with ref access of t1
@@ -2739,6 +2770,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2748,7 +2780,9 @@ EXPLAIN
"key": "idx1",
"key_length": "5",
"used_key_parts": ["d"],
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 6.730769157,
"index_condition": "t2.d is not null",
"attached_condition": "(t2.d,t2.e) in (<cache>((4,4)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1",
@@ -2764,7 +2798,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.d"],
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2774,14 +2810,14 @@ EXPLAIN
select * from t1,t2
where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1;
a b c d e f
-7 7 xxxyy 7 7 h
7 7 xxxya 7 7 h
-7 7 xxxyy 7 7 h
7 7 xxxya 7 7 h
-7 8 xxxxx 7 7 h
+7 7 xxxyy 7 7 h
+7 7 xxxyy 7 7 h
7 8 xxxxa 7 7 h
-7 8 xxxxx 7 7 h
7 8 xxxxa 7 7 h
+7 8 xxxxx 7 7 h
+7 8 xxxxx 7 7 h
alter table t2 drop index idx1, drop index idx2, add index idx3(d,e);
# join order: (t2,t1) with ref access of t1
# range access to t2 by 2-component keys for index idx3
@@ -2796,6 +2832,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2805,7 +2842,9 @@ EXPLAIN
"key": "idx3",
"key_length": "10",
"used_key_parts": ["d", "e"],
+ "loops": 1,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t2.d is not null",
"attached_condition": "(t2.d,t2.e) in (<cache>((4,4)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1",
@@ -2821,7 +2860,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.d"],
+ "loops": 5,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2852,6 +2893,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2861,7 +2903,9 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 15,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.a is not null",
"mrr_type": "Rowid-ordered scan"
@@ -2876,7 +2920,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"ref": ["test.t1.a"],
+ "loops": 15,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a,t2.e) in ((4,t1.a + 1),(7,t1.a + 1),(8,t1.a + 1)) and octet_length(t2.f) = 1"
}
@@ -2887,22 +2933,22 @@ EXPLAIN
select * from t1,t2
where a = d and (a,e) in ((4,d+1),(7,d+1),(8,d+1)) and length(f) = 1;
a b c d e f
-4 5 ww 4 5 a
-7 8 xxxxx 7 8 b
+4 3 zya 4 5 a
+4 3 zya 4 5 a
+4 3 zyx 4 5 a
4 3 zyx 4 5 a
-7 7 xxxyy 7 8 b
4 5 wa 4 5 a
-7 8 xxxxa 7 8 b
-4 3 zya 4 5 a
-7 7 xxxya 7 8 b
+4 5 wa 4 5 a
4 5 ww 4 5 a
-7 8 xxxxx 7 8 b
-4 3 zyx 4 5 a
+4 5 ww 4 5 a
+7 7 xxxya 7 8 b
+7 7 xxxya 7 8 b
+7 7 xxxyy 7 8 b
7 7 xxxyy 7 8 b
-4 5 wa 4 5 a
7 8 xxxxa 7 8 b
-4 3 zya 4 5 a
-7 7 xxxya 7 8 b
+7 8 xxxxa 7 8 b
+7 8 xxxxx 7 8 b
+7 8 xxxxx 7 8 b
# join order: (t1,t2) with ref access of t2
# no range access
explain select * from t1,t2
@@ -2916,13 +2962,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
"possible_keys": ["idx"],
+ "loops": 1,
"rows": 144,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a is not null"
}
@@ -2936,7 +2985,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"ref": ["test.t1.a"],
+ "loops": 144,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "(t1.a,t2.e) in ((t2.e,t1.a + 1),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1"
}
@@ -2947,14 +2998,14 @@ EXPLAIN
select * from t1,t2
where a = d and (a,e) in ((e,d+1),(7,7),(8,8)) and length(f) = 1;
a b c d e f
-7 8 xxxxx 7 7 h
-7 7 xxxyy 7 7 h
-7 8 xxxxa 7 7 h
7 7 xxxya 7 7 h
-7 8 xxxxx 7 7 h
+7 7 xxxya 7 7 h
+7 7 xxxyy 7 7 h
7 7 xxxyy 7 7 h
7 8 xxxxa 7 7 h
-7 7 xxxya 7 7 h
+7 8 xxxxa 7 7 h
+7 8 xxxxx 7 7 h
+7 8 xxxxx 7 7 h
# join order: (t1,t2) with ref access of t2
# range access to t1 by 1-component keys for index idx
explain select * from t1,t2
@@ -2970,6 +3021,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2979,7 +3031,9 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.a is not null",
"attached_condition": "(t1.a,2) in (<cache>((2,2)),<cache>((7,7)),<cache>((8,8))) and octet_length(t1.c) = 1",
@@ -2995,7 +3049,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"ref": ["test.t1.a"],
+ "loops": 12,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "octet_length(t2.f) = 1"
}
@@ -3054,6 +3110,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -3076,7 +3133,9 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 12,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.a is not null",
"attached_condition": "(t1.a,1 + 1) in (<cache>((2,2)),<cache>((7,7)),<cache>((8,8))) and octet_length(t1.c) = 1",
@@ -3092,7 +3151,9 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"ref": ["test.t1.a"],
+ "loops": 12,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "octet_length(t2.f) = 1"
}
@@ -3370,7 +3431,7 @@ insert into t2 select A.a + B.a*10 + C.a*100 from ten A, ten B,ten C where A.a +
# expected type=range, rows=1487 , reason=using index dives
analyze SELECT * FROM t1 where a in (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198);
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE t1 range a a 5 NULL 1487 1199.00 100.00 100.00 Using where; Using index
+1 SIMPLE t1 index a a 5 NULL 2000 2000.00 74.35 59.95 Using where; Using index
insert into t2 values (200),(201);
# expected type=range, rows=201 , reason=using index statistics
analyze SELECT * FROM t1 where a in (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,200,201);
diff --git a/mysql-test/main/range_notembedded.result b/mysql-test/main/range_notembedded.result
index e1bcc7463d5..7834418bd2b 100644
--- a/mysql-test/main/range_notembedded.result
+++ b/mysql-test/main/range_notembedded.result
@@ -225,9 +225,10 @@ user_id int(10) unsigned NOT NULL DEFAULT 0,
PRIMARY KEY (notification_type_id,item_id,item_parent_id,user_id)
);
insert into t1 values (1,1,1,1), (2,2,2,2), (3,3,3,3);
+insert into t1 select seq,seq,seq,seq from seq_10_to_30;
# Run crashing query
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 2 NULL 3 Using where
+1 SIMPLE t1 range PRIMARY PRIMARY 2 NULL 5 Using where
drop table t1;
#
# MDEV-25069: Assertion `root->weight >= ...' failed in SEL_ARG::tree_delete #2
diff --git a/mysql-test/main/range_notembedded.test b/mysql-test/main/range_notembedded.test
index 00d16a5d564..5778cdbd82c 100644
--- a/mysql-test/main/range_notembedded.test
+++ b/mysql-test/main/range_notembedded.test
@@ -122,6 +122,7 @@ CREATE TABLE t1 (
PRIMARY KEY (notification_type_id,item_id,item_parent_id,user_id)
);
insert into t1 values (1,1,1,1), (2,2,2,2), (3,3,3,3);
+insert into t1 select seq,seq,seq,seq from seq_10_to_30;
let $consts=`select group_concat(concat("'",seq,"'")) from seq_1_to_4642`;
diff --git a/mysql-test/main/range_vs_index_merge.result b/mysql-test/main/range_vs_index_merge.result
index 1729b95a105..03ae3d69507 100644
--- a/mysql-test/main/range_vs_index_merge.result
+++ b/mysql-test/main/range_vs_index_merge.result
@@ -949,7 +949,7 @@ WHERE ((Population > 101000 AND Population < 11000) OR
ID BETWEEN 3500 AND 3800) AND Country='USA'
AND (Name LIKE 'P%' OR ID BETWEEN 4000 AND 4300);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range|filter PRIMARY,Population,Country,Name,CountryPopulation,CountryName CountryName|PRIMARY 38|4 NULL 23 (7%) Using index condition; Using where; Using rowid filter
+1 SIMPLE City range PRIMARY,Population,Country,Name,CountryPopulation,CountryName CountryName 38 NULL 23 Using index condition; Using where
EXPLAIN
SELECT * FROM City
WHERE ((Population > 101000 AND Population < 11000) OR
@@ -1077,7 +1077,7 @@ EXPLAIN SELECT Name, Country, Population FROM City WHERE
(Name='Samara' AND Country='RUS') OR
(Name='Seattle' AND Country='USA');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Country,CountryPopulation,CountryName,CityName CountryName,CityName 38,35 NULL 28 Using sort_union(CountryName,CityName); Using where
+1 SIMPLE City range Country,CountryPopulation,CountryName,CityName CountryName 38 NULL 28 Using index condition
SELECT Name, Country, Population FROM City WHERE
(Name='Manila' AND Country='PHL') OR
(Name='Addis Abeba' AND Country='ETH') OR
@@ -1325,11 +1325,11 @@ WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'H'))
AND (Population >= 100000 AND Population < 120000)
ORDER BY Population LIMIT 5;
ID Name Country Population
+3792 Tartu EST 101246
+518 Basildon GBR 100924
519 Worthing GBR 100000
638 al-Arish EGY 100447
-518 Basildon GBR 100924
707 Marbella ESP 101144
-3792 Tartu EST 101246
SHOW STATUS LIKE 'Handler_read_%';
Variable_name Value
Handler_read_first 0
@@ -1750,6 +1750,9 @@ SELECT * FROM t1,t2,t3
WHERE (t2.f3 = 1 OR t3.f1=t2.f1) AND t3.f1 <> t2.f2 AND t3.f2 = t2.f4;
f1 f1 f2 f3 f4 f1 f2
DROP TABLE t1,t2,t3;
+#
+# LP bug #823301: index merge sort union with possible index scan
+#
CREATE TABLE t1 (
a int, b int, c int, d int,
PRIMARY KEY(b), INDEX idx1(d), INDEX idx2(d,b,c)
@@ -1766,7 +1769,7 @@ EXPLAIN
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY,idx1,idx2 NULL NULL NULL 9 Using where
+1 SIMPLE t1 range PRIMARY,idx1,idx2 idx1 5 NULL 5 Using index condition; Using where
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
a b c d
@@ -1780,7 +1783,7 @@ EXPLAIN
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY,idx1,idx2 NULL NULL NULL 9 Using where
+1 SIMPLE t1 range PRIMARY,idx1,idx2 idx1 5 NULL 5 Using index condition; Using where
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
a b c d
@@ -1894,7 +1897,7 @@ INDEX (Percentage)
DROP INDEX Country ON City;
CREATE INDEX CountryName ON City(Country,Name);
CREATE INDEX Name ON City(Name);
-select * from City
+explain select * from City
where
Country='FIN' AND Name IN ('Lahti','Imatra') OR
Country='RUS' AND Name IN ('St Petersburg', 'Moscow') OR
@@ -1906,26 +1909,9 @@ Country='FRA' AND Name IN ('Paris', 'Marcel') OR
Country='POL' AND Name IN ('Warszawa', 'Wroclaw') OR
Country='NOR' AND Name IN ('Oslo', 'Bergen') OR
Country='ITA' AND Name IN ('Napoli', 'Venezia');
-ID Name Country Population
-175 Antwerpen BEL 446525
-176 Gent BEL 224180
-3068 Berlin DEU 3386667
-3087 Bonn DEU 301048
-3242 Lahti FIN 96921
-2974 Paris FRA 2125246
-1466 Napoli ITA 1002619
-1474 Venezia ITA 277305
-2808 Bergen NOR 230948
-2807 Oslo NOR 508726
-2928 Warszawa POL 1615369
-2931 Wroclaw POL 636765
-2918 Braga PRT 90535
-2915 Porto PRT 273060
-3580 Moscow RUS 8389200
-3581 St Petersburg RUS 4694000
-3048 Stockholm SWE 750348
-3051 Uppsala SWE 189569
-explain select * from City
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE City range CountryName,Name CountryName 38 NULL 20 Using index condition
+select * from City
where
Country='FIN' AND Name IN ('Lahti','Imatra') OR
Country='RUS' AND Name IN ('St Petersburg', 'Moscow') OR
@@ -1937,7 +1923,24 @@ Country='FRA' AND Name IN ('Paris', 'Marcel') OR
Country='POL' AND Name IN ('Warszawa', 'Wroclaw') OR
Country='NOR' AND Name IN ('Oslo', 'Bergen') OR
Country='ITA' AND Name IN ('Napoli', 'Venezia');
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range CountryName,Name CountryName 38 NULL 20 Using index condition
+ID Name Country Population
+1466 Napoli ITA 1002619
+1474 Venezia ITA 277305
+175 Antwerpen BEL 446525
+176 Gent BEL 224180
+2807 Oslo NOR 508726
+2808 Bergen NOR 230948
+2915 Porto PRT 273060
+2918 Braga PRT 90535
+2928 Warszawa POL 1615369
+2931 Wroclaw POL 636765
+2974 Paris FRA 2125246
+3048 Stockholm SWE 750348
+3051 Uppsala SWE 189569
+3068 Berlin DEU 3386667
+3087 Bonn DEU 301048
+3242 Lahti FIN 96921
+3580 Moscow RUS 8389200
+3581 St Petersburg RUS 4694000
DROP DATABASE world;
set session optimizer_switch='index_merge_sort_intersection=default';
diff --git a/mysql-test/main/range_vs_index_merge.test b/mysql-test/main/range_vs_index_merge.test
index 38b643c0b13..14d75b88bfa 100644
--- a/mysql-test/main/range_vs_index_merge.test
+++ b/mysql-test/main/range_vs_index_merge.test
@@ -659,7 +659,7 @@ let $cond =
(Name='Lugansk' AND Country='UKR') OR
(Name='Caracas' AND Country='VEN') OR
(Name='Samara' AND Country='RUS') OR
-(Name='Seattle' AND Country='USA');
+(Name='Seattle' AND Country='USA');
eval
EXPLAIN SELECT Name, Country, Population FROM City WHERE
@@ -714,6 +714,7 @@ SELECT * FROM City
ORDER BY Population LIMIT 5;
FLUSH STATUS;
+--sorted_result
SELECT * FROM City
WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'H'))
AND (Population >= 100000 AND Population < 120000)
@@ -1174,9 +1175,9 @@ SELECT * FROM t1,t2,t3
DROP TABLE t1,t2,t3;
-#
-# LP bug #823301: index merge sort union with possible index scan
-#
+--echo #
+--echo # LP bug #823301: index merge sort union with possible index scan
+--echo #
CREATE TABLE t1 (
a int, b int, c int, d int,
@@ -1192,12 +1193,14 @@ SET SESSION optimizer_switch='index_merge_sort_union=off';
EXPLAIN
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
+--sorted_result
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
SET SESSION optimizer_switch='index_merge_sort_union=on';
EXPLAIN
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
+--sorted_result
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
SET SESSION optimizer_switch=DEFAULT;
@@ -1320,12 +1323,11 @@ where
Country='NOR' AND Name IN ('Oslo', 'Bergen') OR
Country='ITA' AND Name IN ('Napoli', 'Venezia');
-eval $q;
eval explain $q;
-
+--sorted_result
+eval $q;
DROP DATABASE world;
#the following command must be the last one in the file
set session optimizer_switch='index_merge_sort_intersection=default';
-
diff --git a/mysql-test/main/range_vs_index_merge_innodb.result b/mysql-test/main/range_vs_index_merge_innodb.result
index 79a670aedb2..8e0bf58fbc3 100644
--- a/mysql-test/main/range_vs_index_merge_innodb.result
+++ b/mysql-test/main/range_vs_index_merge_innodb.result
@@ -1331,11 +1331,11 @@ WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'H'))
AND (Population >= 100000 AND Population < 120000)
ORDER BY Population LIMIT 5;
ID Name Country Population
+3792 Tartu EST 101246
+518 Basildon GBR 100924
519 Worthing GBR 100000
638 al-Arish EGY 100447
-518 Basildon GBR 100924
707 Marbella ESP 101144
-3792 Tartu EST 101246
SHOW STATUS LIKE 'Handler_read_%';
Variable_name Value
Handler_read_first 0
@@ -1756,6 +1756,9 @@ SELECT * FROM t1,t2,t3
WHERE (t2.f3 = 1 OR t3.f1=t2.f1) AND t3.f1 <> t2.f2 AND t3.f2 = t2.f4;
f1 f1 f2 f3 f4 f1 f2
DROP TABLE t1,t2,t3;
+#
+# LP bug #823301: index merge sort union with possible index scan
+#
CREATE TABLE t1 (
a int, b int, c int, d int,
PRIMARY KEY(b), INDEX idx1(d), INDEX idx2(d,b,c)
@@ -1772,7 +1775,7 @@ EXPLAIN
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY,idx1,idx2 NULL NULL NULL 9 Using where
+1 SIMPLE t1 range PRIMARY,idx1,idx2 idx1 5 NULL 5 Using index condition
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
a b c d
@@ -1786,7 +1789,7 @@ EXPLAIN
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY,idx1,idx2 NULL NULL NULL 9 Using where
+1 SIMPLE t1 range PRIMARY,idx1,idx2 idx1 5 NULL 5 Using index condition
SELECT * FROM t1
WHERE t1.b>7 AND t1.d>1 AND t1.d<>8 OR t1.d>=7 AND t1.d<8 OR t1.d>7;
a b c d
@@ -1804,7 +1807,7 @@ SELECT * FROM t1
WHERE t1.a>300 AND t1.c!=0 AND t1.b>=350 AND t1.b<=400 AND
(t1.c=0 OR t1.a=500);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,idx idx 5 NULL 2 Using where; Using index
+1 SIMPLE t1 range PRIMARY,idx PRIMARY 4 NULL 1 Using where
SELECT * FROM t1
WHERE t1.a>300 AND t1.c!=0 AND t1.b>=350 AND t1.b<=400 AND
(t1.c=0 OR t1.a=500);
@@ -1900,7 +1903,7 @@ INDEX (Percentage)
DROP INDEX Country ON City;
CREATE INDEX CountryName ON City(Country,Name);
CREATE INDEX Name ON City(Name);
-select * from City
+explain select * from City
where
Country='FIN' AND Name IN ('Lahti','Imatra') OR
Country='RUS' AND Name IN ('St Petersburg', 'Moscow') OR
@@ -1912,26 +1915,9 @@ Country='FRA' AND Name IN ('Paris', 'Marcel') OR
Country='POL' AND Name IN ('Warszawa', 'Wroclaw') OR
Country='NOR' AND Name IN ('Oslo', 'Bergen') OR
Country='ITA' AND Name IN ('Napoli', 'Venezia');
-ID Name Country Population
-175 Antwerpen BEL 446525
-2808 Bergen NOR 230948
-3068 Berlin DEU 3386667
-3087 Bonn DEU 301048
-2918 Braga PRT 90535
-176 Gent BEL 224180
-3242 Lahti FIN 96921
-3580 Moscow RUS 8389200
-1466 Napoli ITA 1002619
-2807 Oslo NOR 508726
-2974 Paris FRA 2125246
-2915 Porto PRT 273060
-3581 St Petersburg RUS 4694000
-3048 Stockholm SWE 750348
-3051 Uppsala SWE 189569
-1474 Venezia ITA 277305
-2928 Warszawa POL 1615369
-2931 Wroclaw POL 636765
-explain select * from City
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE City range CountryName,Name Name 35 NULL 20 Using index condition; Using where
+select * from City
where
Country='FIN' AND Name IN ('Lahti','Imatra') OR
Country='RUS' AND Name IN ('St Petersburg', 'Moscow') OR
@@ -1943,8 +1929,25 @@ Country='FRA' AND Name IN ('Paris', 'Marcel') OR
Country='POL' AND Name IN ('Warszawa', 'Wroclaw') OR
Country='NOR' AND Name IN ('Oslo', 'Bergen') OR
Country='ITA' AND Name IN ('Napoli', 'Venezia');
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range CountryName,Name Name 35 NULL 20 Using index condition; Using where
+ID Name Country Population
+1466 Napoli ITA 1002619
+1474 Venezia ITA 277305
+175 Antwerpen BEL 446525
+176 Gent BEL 224180
+2807 Oslo NOR 508726
+2808 Bergen NOR 230948
+2915 Porto PRT 273060
+2918 Braga PRT 90535
+2928 Warszawa POL 1615369
+2931 Wroclaw POL 636765
+2974 Paris FRA 2125246
+3048 Stockholm SWE 750348
+3051 Uppsala SWE 189569
+3068 Berlin DEU 3386667
+3087 Bonn DEU 301048
+3242 Lahti FIN 96921
+3580 Moscow RUS 8389200
+3581 St Petersburg RUS 4694000
DROP DATABASE world;
set session optimizer_switch='index_merge_sort_intersection=default';
set global innodb_stats_persistent= @innodb_stats_persistent_save;
diff --git a/mysql-test/main/rowid_filter.result b/mysql-test/main/rowid_filter.result
index bb55a9e328d..b544c784c25 100644
--- a/mysql-test/main/rowid_filter.result
+++ b/mysql-test/main/rowid_filter.result
@@ -57,47 +57,50 @@ set optimizer_use_condition_selectivity=2;
select
100 *
(select count(*) from lineitem
-WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND l_quantity > 45
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND l_quantity > 47
)
/
(select count(*) from lineitem
where l_shipdate BETWEEN '1997-01-01' AND '1997-06-30')
as correct_r_filtered_when_using_l_shipdate;
correct_r_filtered_when_using_l_shipdate
-11.7647
+6.6667
set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) Using index condition; Using where; Using rowid filter
+1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_quantity|i_l_shipdate 9|4 NULL 426 (8%) Using index condition; Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "lineitem",
"access_type": "range",
"possible_keys": ["i_l_shipdate", "i_l_quantity"],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
"rowid_filter": {
"range": {
- "key": "i_l_quantity",
- "used_key_parts": ["l_quantity"]
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
},
- "rows": 702,
- "selectivity_pct": 11.69025812
+ "rows": 509,
+ "selectivity_pct": 8.476269775
},
- "rows": 509,
- "filtered": 11.69025803,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "loops": 1,
+ "rows": 426,
+ "cost": "COST_REPLACED",
+ "filtered": 8.476269722,
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
}
]
@@ -105,12 +108,12 @@ EXPLAIN
}
set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) 60.00 (11%) 11.69 100.00 Using index condition; Using where; Using rowid filter
+1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_quantity|i_l_shipdate 9|4 NULL 426 (8%) 34.00 (9%) 8.48 100.00 Using index condition; Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
ANALYZE
{
"query_optimization": {
@@ -118,6 +121,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -126,31 +130,33 @@ ANALYZE
"table_name": "lineitem",
"access_type": "range",
"possible_keys": ["i_l_shipdate", "i_l_quantity"],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
"rowid_filter": {
"range": {
- "key": "i_l_quantity",
- "used_key_parts": ["l_quantity"]
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
},
- "rows": 702,
- "selectivity_pct": 11.69025812,
- "r_rows": 605,
- "r_lookups": 510,
- "r_selectivity_pct": 11.76470588,
+ "rows": 509,
+ "selectivity_pct": 8.476269775,
+ "r_rows": 510,
+ "r_lookups": 349,
+ "r_selectivity_pct": 9.742120344,
"r_buffer_size": "REPLACED",
"r_filling_time_ms": "REPLACED"
},
+ "loops": 1,
"r_loops": 1,
- "rows": 509,
- "r_rows": 60,
+ "rows": 426,
+ "r_rows": 34,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
- "filtered": 11.69025803,
+ "filtered": 8.476269722,
"r_filtered": 100,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
}
]
@@ -158,21 +164,13 @@ ANALYZE
}
set statement optimizer_switch='rowid_filter=on' for SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
l_orderkey l_linenumber l_shipdate l_quantity
-1121 5 1997-04-27 47
1121 6 1997-04-21 50
1441 7 1997-06-07 50
-1443 1 1997-02-05 47
1473 1 1997-05-05 50
-1568 2 1997-04-06 46
-1632 1 1997-01-25 47
-1632 3 1997-01-29 47
1954 7 1997-06-04 49
-1959 1 1997-05-05 46
2151 3 1997-01-20 49
-2177 5 1997-05-10 46
-2369 2 1997-01-02 47
2469 3 1997-01-11 48
2469 6 1997-03-03 49
2470 2 1997-06-02 50
@@ -185,66 +183,51 @@ l_orderkey l_linenumber l_shipdate l_quantity
3429 1 1997-04-08 48
3490 2 1997-06-27 50
3619 1 1997-01-22 49
-3619 3 1997-01-31 46
-3969 3 1997-05-29 46
4005 4 1997-01-31 49
-4036 1 1997-06-21 46
4066 4 1997-02-17 49
-4098 1 1997-01-26 46
-422 3 1997-06-21 46
-4258 3 1997-01-02 46
-4421 2 1997-04-21 46
-4421 3 1997-05-25 46
4453 3 1997-05-29 48
4484 7 1997-03-17 50
-4609 3 1997-02-11 46
484 1 1997-03-06 49
484 3 1997-01-24 50
484 5 1997-03-05 48
485 1 1997-03-28 50
-4868 1 1997-04-29 47
4868 3 1997-04-23 49
4934 1 1997-05-20 48
4967 1 1997-05-27 50
-5090 2 1997-04-05 46
5152 2 1997-03-10 50
5158 4 1997-04-10 49
-5606 3 1997-03-11 46
-5606 7 1997-02-01 46
-5762 4 1997-03-02 47
581 3 1997-02-27 49
5829 5 1997-01-31 49
-5831 4 1997-02-24 46
-5895 2 1997-04-27 47
5895 3 1997-03-15 49
5952 1 1997-06-30 49
-705 1 1997-04-18 46
-836 3 1997-03-21 46
set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 509 Using index condition; Using where
+1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_quantity 9 NULL 426 Using index condition; Using where
set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "lineitem",
"access_type": "range",
"possible_keys": ["i_l_shipdate", "i_l_quantity"],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
- "rows": 509,
- "filtered": 11.69025803,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
+ "loops": 1,
+ "rows": 426,
+ "cost": "COST_REPLACED",
+ "filtered": 8.476269722,
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
}
]
@@ -252,12 +235,12 @@ EXPLAIN
}
set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 509 510.00 11.69 11.76 Using index condition; Using where
+1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_quantity 9 NULL 426 349.00 8.48 9.74 Using index condition; Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
ANALYZE
{
"query_optimization": {
@@ -265,6 +248,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -273,18 +257,20 @@ ANALYZE
"table_name": "lineitem",
"access_type": "range",
"possible_keys": ["i_l_shipdate", "i_l_quantity"],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
+ "loops": 1,
"r_loops": 1,
- "rows": 509,
- "r_rows": 510,
+ "rows": 426,
+ "r_rows": 349,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
- "filtered": 11.69025803,
- "r_filtered": 11.76470588,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "filtered": 8.476269722,
+ "r_filtered": 9.742120344,
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
}
]
@@ -292,21 +278,13 @@ ANALYZE
}
set statement optimizer_switch='rowid_filter=off' for SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
l_orderkey l_linenumber l_shipdate l_quantity
-1121 5 1997-04-27 47
1121 6 1997-04-21 50
1441 7 1997-06-07 50
-1443 1 1997-02-05 47
1473 1 1997-05-05 50
-1568 2 1997-04-06 46
-1632 1 1997-01-25 47
-1632 3 1997-01-29 47
1954 7 1997-06-04 49
-1959 1 1997-05-05 46
2151 3 1997-01-20 49
-2177 5 1997-05-10 46
-2369 2 1997-01-02 47
2469 3 1997-01-11 48
2469 6 1997-03-03 49
2470 2 1997-06-02 50
@@ -319,48 +297,30 @@ l_orderkey l_linenumber l_shipdate l_quantity
3429 1 1997-04-08 48
3490 2 1997-06-27 50
3619 1 1997-01-22 49
-3619 3 1997-01-31 46
-3969 3 1997-05-29 46
4005 4 1997-01-31 49
-4036 1 1997-06-21 46
4066 4 1997-02-17 49
-4098 1 1997-01-26 46
-422 3 1997-06-21 46
-4258 3 1997-01-02 46
-4421 2 1997-04-21 46
-4421 3 1997-05-25 46
4453 3 1997-05-29 48
4484 7 1997-03-17 50
-4609 3 1997-02-11 46
484 1 1997-03-06 49
484 3 1997-01-24 50
484 5 1997-03-05 48
485 1 1997-03-28 50
-4868 1 1997-04-29 47
4868 3 1997-04-23 49
4934 1 1997-05-20 48
4967 1 1997-05-27 50
-5090 2 1997-04-05 46
5152 2 1997-03-10 50
5158 4 1997-04-10 49
-5606 3 1997-03-11 46
-5606 7 1997-02-01 46
-5762 4 1997-03-02 47
581 3 1997-02-27 49
5829 5 1997-01-31 49
-5831 4 1997-02-24 46
-5895 2 1997-04-27 47
5895 3 1997-03-15 49
5952 1 1997-06-30 49
-705 1 1997-04-18 46
-836 3 1997-03-21 46
set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
o_totalprice between 200000 and 230000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 Using index condition
-1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) Using where; Using rowid filter
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 Using index condition
+1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (2%) Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
@@ -369,45 +329,50 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
- "table_name": "lineitem",
+ "table_name": "orders",
"access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "loops": 1,
+ "rows": 69,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "index_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
"possible_keys": [
"PRIMARY",
"i_l_shipdate",
"i_l_orderkey",
"i_l_orderkey_quantity"
],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
- "rows": 98,
- "filtered": 100,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
- }
- },
- {
- "table": {
- "table_name": "orders",
- "access_type": "eq_ref",
- "possible_keys": ["PRIMARY", "i_o_totalprice"],
- "key": "PRIMARY",
+ "key": "i_l_orderkey",
"key_length": "4",
- "used_key_parts": ["o_orderkey"],
- "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
"rowid_filter": {
"range": {
- "key": "i_o_totalprice",
- "used_key_parts": ["o_totalprice"]
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
},
- "rows": 69,
- "selectivity_pct": 4.6
+ "rows": 98,
+ "selectivity_pct": 1.631973356
},
- "rows": 1,
- "filtered": 4.599999905,
- "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ "loops": 69,
+ "rows": 4,
+ "cost": "COST_REPLACED",
+ "filtered": 1.631973386,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
}
}
]
@@ -418,8 +383,8 @@ FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
o_totalprice between 200000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using index condition
-1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) 0.11 (10%) 4.60 100.00 Using where; Using rowid filter
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 71.00 100.00 100.00 Using index condition
+1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (2%) 0.15 (2%) 1.63 100.00 Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
@@ -431,62 +396,67 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
{
"table": {
- "table_name": "lineitem",
+ "table_name": "orders",
"access_type": "range",
- "possible_keys": [
- "PRIMARY",
- "i_l_shipdate",
- "i_l_orderkey",
- "i_l_orderkey_quantity"
- ],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "loops": 1,
"r_loops": 1,
- "rows": 98,
- "r_rows": 98,
+ "rows": 69,
+ "r_rows": 71,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
"r_filtered": 100,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
+ "index_condition": "orders.o_totalprice between 200000 and 230000"
}
},
{
"table": {
- "table_name": "orders",
- "access_type": "eq_ref",
- "possible_keys": ["PRIMARY", "i_o_totalprice"],
- "key": "PRIMARY",
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_orderkey",
"key_length": "4",
- "used_key_parts": ["o_orderkey"],
- "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
"rowid_filter": {
"range": {
- "key": "i_o_totalprice",
- "used_key_parts": ["o_totalprice"]
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
},
- "rows": 69,
- "selectivity_pct": 4.6,
- "r_rows": 71,
- "r_lookups": 96,
- "r_selectivity_pct": 10.41666667,
+ "rows": 98,
+ "selectivity_pct": 1.631973356,
+ "r_rows": 98,
+ "r_lookups": 476,
+ "r_selectivity_pct": 2.31092437,
"r_buffer_size": "REPLACED",
"r_filling_time_ms": "REPLACED"
},
- "r_loops": 98,
- "rows": 1,
- "r_rows": 0.112244898,
+ "loops": 69,
+ "r_loops": 71,
+ "rows": 4,
+ "r_rows": 0.154929577,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
- "filtered": 4.599999905,
+ "filtered": 1.631973386,
"r_filtered": 100,
- "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
}
}
]
@@ -523,6 +493,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -537,7 +508,9 @@ EXPLAIN
"key": "i_l_shipdate",
"key_length": "4",
"used_key_parts": ["l_shipDATE"],
+ "loops": 1,
"rows": 98,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
}
@@ -551,7 +524,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 98,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 4.599999905,
"attached_condition": "orders.o_totalprice between 200000 and 230000"
}
@@ -577,6 +552,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -593,9 +569,11 @@ ANALYZE
"key": "i_l_shipdate",
"key_length": "4",
"used_key_parts": ["l_shipDATE"],
+ "loops": 1,
"r_loops": 1,
"rows": 98,
"r_rows": 98,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -612,9 +590,12 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 98,
"r_loops": 98,
+ "r_table_loops": 96,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 4.599999905,
@@ -644,20 +625,21 @@ o_orderkey l_linenumber l_shipdate o_totalprice
set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) Using index condition; Using where; Using rowid filter
+1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_quantity|i_l_shipdate 9|4 NULL 426 (8%) Using index condition; Using where; Using rowid filter
1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (9%) Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -670,21 +652,23 @@ EXPLAIN
"i_l_orderkey_quantity",
"i_l_quantity"
],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
"rowid_filter": {
"range": {
- "key": "i_l_quantity",
- "used_key_parts": ["l_quantity"]
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
},
- "rows": 702,
- "selectivity_pct": 11.69025812
+ "rows": 509,
+ "selectivity_pct": 8.476269775
},
- "rows": 509,
- "filtered": 11.69025803,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "loops": 1,
+ "rows": 426,
+ "cost": "COST_REPLACED",
+ "filtered": 8.476269722,
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
},
{
@@ -704,7 +688,9 @@ EXPLAIN
"rows": 139,
"selectivity_pct": 9.266666667
},
+ "loops": 36.10890924,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 9.266666412,
"attached_condition": "orders.o_totalprice between 180000 and 230000"
}
@@ -715,15 +701,15 @@ EXPLAIN
set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) 60.00 (11%) 11.69 100.00 Using index condition; Using where; Using rowid filter
-1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (9%) 0.27 (25%) 9.27 100.00 Using where; Using rowid filter
+1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_quantity|i_l_shipdate 9|4 NULL 426 (8%) 34.00 (9%) 8.48 100.00 Using index condition; Using where; Using rowid filter
+1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (9%) 0.26 (26%) 9.27 100.00 Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
ANALYZE
{
@@ -732,6 +718,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -746,31 +733,33 @@ ANALYZE
"i_l_orderkey_quantity",
"i_l_quantity"
],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
"rowid_filter": {
"range": {
- "key": "i_l_quantity",
- "used_key_parts": ["l_quantity"]
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
},
- "rows": 702,
- "selectivity_pct": 11.69025812,
- "r_rows": 605,
- "r_lookups": 510,
- "r_selectivity_pct": 11.76470588,
+ "rows": 509,
+ "selectivity_pct": 8.476269775,
+ "r_rows": 510,
+ "r_lookups": 349,
+ "r_selectivity_pct": 9.742120344,
"r_buffer_size": "REPLACED",
"r_filling_time_ms": "REPLACED"
},
+ "loops": 1,
"r_loops": 1,
- "rows": 509,
- "r_rows": 60,
+ "rows": 426,
+ "r_rows": 34,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
- "filtered": 11.69025803,
+ "filtered": 8.476269722,
"r_filtered": 100,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
},
{
@@ -790,14 +779,16 @@ ANALYZE
"rows": 139,
"selectivity_pct": 9.266666667,
"r_rows": 144,
- "r_lookups": 59,
- "r_selectivity_pct": 25.42372881,
+ "r_lookups": 34,
+ "r_selectivity_pct": 26.47058824,
"r_buffer_size": "REPLACED",
"r_filling_time_ms": "REPLACED"
},
- "r_loops": 60,
+ "loops": 36.10890924,
+ "r_loops": 34,
"rows": 1,
- "r_rows": 0.266666667,
+ "r_rows": 0.264705882,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 9.266666412,
@@ -811,42 +802,36 @@ ANALYZE
set statement optimizer_switch='rowid_filter=on' for SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
o_orderkey l_linenumber l_shipdate l_quantity o_totalprice
-1632 1 1997-01-25 47 183286.33
-1632 3 1997-01-29 47 183286.33
-2177 5 1997-05-10 46 183493.42
2469 3 1997-01-11 48 192074.23
2469 6 1997-03-03 49 192074.23
3619 1 1997-01-22 49 222274.54
-3619 3 1997-01-31 46 222274.54
484 1 1997-03-06 49 219920.62
484 3 1997-01-24 50 219920.62
484 5 1997-03-05 48 219920.62
4934 1 1997-05-20 48 180478.16
-5606 3 1997-03-11 46 219959.08
-5606 7 1997-02-01 46 219959.08
5829 5 1997-01-31 49 183734.56
-5895 2 1997-04-27 47 201419.83
5895 3 1997-03-15 49 201419.83
set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate 4 NULL 509 Using index condition; Using where
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_quantity 9 NULL 426 Using index condition; Using where
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -859,13 +844,15 @@ EXPLAIN
"i_l_orderkey_quantity",
"i_l_quantity"
],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
- "rows": 509,
- "filtered": 11.69025803,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
+ "loops": 1,
+ "rows": 426,
+ "cost": "COST_REPLACED",
+ "filtered": 8.476269722,
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
},
{
@@ -877,7 +864,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 36.10890924,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 9.266666412,
"attached_condition": "orders.o_totalprice between 180000 and 230000"
}
@@ -888,15 +877,15 @@ EXPLAIN
set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate 4 NULL 509 510.00 11.69 11.76 Using index condition; Using where
-1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 9.27 26.67 Using where
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_quantity 9 NULL 426 349.00 8.48 9.74 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 9.27 26.47 Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
ANALYZE
{
@@ -905,6 +894,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -919,18 +909,20 @@ ANALYZE
"i_l_orderkey_quantity",
"i_l_quantity"
],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
+ "loops": 1,
"r_loops": 1,
- "rows": 509,
- "r_rows": 510,
+ "rows": 426,
+ "r_rows": 349,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
- "filtered": 11.69025803,
- "r_filtered": 11.76470588,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "filtered": 8.476269722,
+ "r_filtered": 9.742120344,
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
},
{
@@ -942,13 +934,15 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
- "r_loops": 60,
+ "loops": 36.10890924,
+ "r_loops": 34,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 9.266666412,
- "r_filtered": 26.66666667,
+ "r_filtered": 26.47058824,
"attached_condition": "orders.o_totalprice between 180000 and 230000"
}
}
@@ -958,25 +952,26 @@ ANALYZE
set statement optimizer_switch='rowid_filter=off' for SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
o_orderkey l_linenumber l_shipdate l_quantity o_totalprice
-1632 1 1997-01-25 47 183286.33
-1632 3 1997-01-29 47 183286.33
-2177 5 1997-05-10 46 183493.42
2469 3 1997-01-11 48 192074.23
2469 6 1997-03-03 49 192074.23
3619 1 1997-01-22 49 222274.54
-3619 3 1997-01-31 46 222274.54
484 1 1997-03-06 49 219920.62
484 3 1997-01-24 50 219920.62
484 5 1997-03-05 48 219920.62
4934 1 1997-05-20 48 180478.16
-5606 3 1997-03-11 46 219959.08
-5606 7 1997-02-01 46 219959.08
5829 5 1997-01-31 49 183734.56
-5895 2 1997-04-27 47 201419.83
5895 3 1997-03-15 49 201419.83
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT STRAIGHT_JOIN o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM lineitem JOIN orders ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_quantity|i_l_shipdate 9|4 NULL 426 (8%) Using index condition; Using where; Using rowid filter
+1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (9%) Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
@@ -992,6 +987,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1001,7 +997,9 @@ EXPLAIN
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
+ "loops": 1,
"rows": 69,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "orders.o_totalprice between 200000 and 230000"
}
@@ -1028,7 +1026,9 @@ EXPLAIN
"rows": 509,
"selectivity_pct": 8.476269775
},
+ "loops": 69,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 8.476269722,
"attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
@@ -1054,6 +1054,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1065,9 +1066,11 @@ ANALYZE
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
+ "loops": 1,
"r_loops": 1,
"rows": 69,
"r_rows": 71,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1102,9 +1105,11 @@ ANALYZE
"r_buffer_size": "REPLACED",
"r_filling_time_ms": "REPLACED"
},
+ "loops": 69,
"r_loops": 71,
"rows": 4,
"r_rows": 0.521126761,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 8.476269722,
@@ -1172,6 +1177,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1181,7 +1187,9 @@ EXPLAIN
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
+ "loops": 1,
"rows": 69,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "orders.o_totalprice between 200000 and 230000"
}
@@ -1200,7 +1208,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 69,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 8.476269722,
"attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
@@ -1226,6 +1236,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1237,9 +1248,11 @@ ANALYZE
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
+ "loops": 1,
"r_loops": 1,
"rows": 69,
"r_rows": 71,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1261,9 +1274,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 69,
"r_loops": 71,
"rows": 4,
"r_rows": 6.704225352,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 8.476269722,
@@ -1342,6 +1357,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1357,7 +1373,9 @@ EXPLAIN
"key": "i_l_receiptdate",
"key_length": "4",
"used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 0.566194832,
"index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
@@ -1372,7 +1390,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 7.466666698,
"attached_condition": "orders.o_totalprice between 200000 and 250000"
}
@@ -1402,6 +1422,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1419,9 +1440,11 @@ ANALYZE
"key": "i_l_receiptdate",
"key_length": "4",
"used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
"r_loops": 1,
"rows": 18,
"r_rows": 18,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 0.566194832,
@@ -1439,9 +1462,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
"r_loops": 7,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 7.466666698,
@@ -1479,6 +1504,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1494,7 +1520,9 @@ EXPLAIN
"key": "i_l_receiptdate",
"key_length": "4",
"used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 0.566194832,
"index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
@@ -1509,7 +1537,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 7.466666698,
"attached_condition": "orders.o_totalprice between 200000 and 250000"
}
@@ -1539,6 +1569,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1556,9 +1587,11 @@ ANALYZE
"key": "i_l_receiptdate",
"key_length": "4",
"used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
"r_loops": 1,
"rows": 18,
"r_rows": 18,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 0.566194832,
@@ -1576,9 +1609,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
"r_loops": 7,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 7.466666698,
@@ -1612,7 +1647,7 @@ o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 Using index condition; Using where
-1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey 4 dbt3_s001.orders.o_orderkey 4 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM orders, lineitem
WHERE o_orderkey=l_orderkey AND
@@ -1623,6 +1658,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1632,7 +1668,9 @@ EXPLAIN
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"rows": 39,
+ "cost": "COST_REPLACED",
"filtered": 3.200000048,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000"
@@ -1648,11 +1686,13 @@ EXPLAIN
"i_l_orderkey",
"i_l_orderkey_quantity"
],
- "key": "i_l_orderkey",
+ "key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.248,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 3.047460556,
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
}
@@ -1668,7 +1708,7 @@ o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 3.20 2.44 Using index condition; Using where
-1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM orders, lineitem
WHERE o_orderkey=l_orderkey AND
@@ -1682,6 +1722,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1693,9 +1734,11 @@ ANALYZE
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"r_loops": 1,
"rows": 39,
"r_rows": 41,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 3.200000048,
@@ -1714,13 +1757,15 @@ ANALYZE
"i_l_orderkey",
"i_l_orderkey_quantity"
],
- "key": "i_l_orderkey",
+ "key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.248,
"r_loops": 1,
"rows": 4,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 3.047460556,
@@ -1750,7 +1795,7 @@ o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 Using index condition; Using where
-1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey 4 dbt3_s001.orders.o_orderkey 4 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM orders, lineitem
WHERE o_orderkey=l_orderkey AND
@@ -1761,6 +1806,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1770,7 +1816,9 @@ EXPLAIN
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"rows": 39,
+ "cost": "COST_REPLACED",
"filtered": 3.200000048,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000"
@@ -1786,11 +1834,13 @@ EXPLAIN
"i_l_orderkey",
"i_l_orderkey_quantity"
],
- "key": "i_l_orderkey",
+ "key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.248,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 3.047460556,
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
}
@@ -1806,7 +1856,7 @@ o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 3.20 2.44 Using index condition; Using where
-1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM orders, lineitem
WHERE o_orderkey=l_orderkey AND
@@ -1820,6 +1870,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1831,9 +1882,11 @@ ANALYZE
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"r_loops": 1,
"rows": 39,
"r_rows": 41,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 3.200000048,
@@ -1852,13 +1905,15 @@ ANALYZE
"i_l_orderkey",
"i_l_orderkey_quantity"
],
- "key": "i_l_orderkey",
+ "key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.248,
"r_loops": 1,
"rows": 4,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 3.047460556,
@@ -1891,7 +1946,7 @@ o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 Using index condition; Using where
-1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey 4 dbt3_s001.orders.o_orderkey 4 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM v1, lineitem
WHERE o_orderkey=l_orderkey AND
@@ -1902,6 +1957,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
@@ -1916,7 +1972,9 @@ EXPLAIN
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"rows": 39,
+ "cost": "REPLACED",
"filtered": "REPLACED",
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
@@ -1932,11 +1990,13 @@ EXPLAIN
"i_l_orderkey",
"i_l_orderkey_quantity"
],
- "key": "i_l_orderkey",
+ "key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1,
"rows": 4,
+ "cost": "REPLACED",
"filtered": "REPLACED",
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
}
@@ -1952,7 +2012,7 @@ o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 # 2.44 Using index condition; Using where
-1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey 4 dbt3_s001.orders.o_orderkey 4 6.00 # 66.67 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 # 66.67 Using where
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM v1, lineitem
WHERE o_orderkey=l_orderkey AND
@@ -1966,6 +2026,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1982,9 +2043,11 @@ ANALYZE
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"r_loops": 1,
"rows": 39,
"r_rows": 41,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": "REPLACED",
@@ -2003,13 +2066,15 @@ ANALYZE
"i_l_orderkey",
"i_l_orderkey_quantity"
],
- "key": "i_l_orderkey",
+ "key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": "REPLACED",
@@ -2039,7 +2104,7 @@ o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 Using index condition; Using where
-1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey 4 dbt3_s001.orders.o_orderkey 4 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM v1, lineitem
WHERE o_orderkey=l_orderkey AND
@@ -2050,6 +2115,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
@@ -2064,7 +2130,9 @@ EXPLAIN
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"rows": 39,
+ "cost": "REPLACED",
"filtered": "REPLACED",
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
@@ -2080,11 +2148,13 @@ EXPLAIN
"i_l_orderkey",
"i_l_orderkey_quantity"
],
- "key": "i_l_orderkey",
+ "key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1,
"rows": 4,
+ "cost": "REPLACED",
"filtered": "REPLACED",
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
}
@@ -2100,7 +2170,7 @@ o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 # 2.44 Using index condition; Using where
-1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey 4 dbt3_s001.orders.o_orderkey 4 6.00 # 66.67 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 # 66.67 Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM v1, lineitem
WHERE o_orderkey=l_orderkey AND
@@ -2114,6 +2184,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -2130,9 +2201,11 @@ ANALYZE
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"r_loops": 1,
"rows": 39,
"r_rows": 41,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": "REPLACED",
@@ -2151,13 +2224,15 @@ ANALYZE
"i_l_orderkey",
"i_l_orderkey_quantity"
],
- "key": "i_l_orderkey",
+ "key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": "REPLACED",
@@ -2185,700 +2260,3 @@ ALTER TABLE orders DROP COLUMN o_totaldiscount;
DROP VIEW v1;
DROP DATABASE dbt3_s001;
use test;
-#
-# MDEV-18816: potential range filter for one join table with
-# impossible WHERE for another
-#
-create table t1 (
-pk int not null primary key, c2 varchar(10) , i1 int,key (c2)
-) engine=myisam;
-insert into t1 values (1,'a',-5),(2,'a',null);
-create table t2 (
-pk int, i1 int, c1 varchar(30) , key c1 (c1(30)), key i1 (i1)
-) engine=myisam;
-insert into t2 values
-(1,-5,'a'),(2,null,'a'),(3,null,'a'),(4,null,'a'),(5,5,'a'),(6,null,'a'),
-(7,4,'a'),(8,55,'a'),(9,null,'a'),(10,null,'a'),(11,null,'a'),(12,-5,'a'),
-(13,-5,'a'),(14,null,'a'),(15,null,'a'),(16,-5,'a'),(17,-5,'a');
-select 1
-from t1
-left join
-t2 join t1 as t1_a on t2.i1 = t1_a.pk
-on t1.c2 = t2.c1
-where t1_a.pk is null and t1_a.i1 != 3;
-1
-explain extended select 1
-from t1
-left join
-t2 join t1 as t1_a on t2.i1 = t1_a.pk
-on t1.c2 = t2.c1
-where t1_a.pk is null and t1_a.i1 != 3;
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
-Warnings:
-Note 1003 select 1 AS `1` from `test`.`t1` join `test`.`t2` join `test`.`t1` `t1_a` where 0
-drop table t1,t2;
-#
-# MDEV-18640: TABLE::prune_range_rowid_filters: Conditional jump or
-# move depends on uninitialized value
-#
-CREATE TABLE t1 (
-pk INT, i INT, PRIMARY KEY (pk), KEY (pk,i)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (1,10), (7,70), (2,20);
-SELECT * FROM t1 WHERE pk < 5;
-pk i
-1 10
-2 20
-DROP TABLE t1;
-#
-# MDEV-18956: Possible rowid filter for subquery for which
-# in_to_exists strategy has been chosen
-#
-CREATE TABLE t1 (pk int) engine=myisam ;
-INSERT INTO t1 VALUES (1),(2);
-CREATE TABLE t2 (
-pk int auto_increment PRIMARY KEY,
-i1 int, i2 int, c2 varchar(1),
-KEY (i1), KEY (i2)
-) engine=myisam;
-INSERT INTO t2 VALUES
-(1,8,6,'t'),(2,5,7,'i'),(3,4,4,'h'),(4,207,38,'d'),(5,183,206,'b'),
-(6,7,null,'o'),(7,1,2,'j'),(8,17,36,'s'),(9,4,5,'q'),(10,0,6,'l'),
-(11,1,9,'j'),(12,5,6,'y'),(13,null,0,'i'),(14,7,7,'x'),(15,5,2,'u');
-SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
-pk
-EXPLAIN EXTENDED
-SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING
-2 SUBQUERY t2 ref i1,i2 i1 5 const 1 100.00 Using index condition; Using where
-Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` having 0
-DROP TABLE t1,t2;
-#
-# MDEV-19255: rowid range filter built for range condition
-# that uses in expensive subquery
-#
-CREATE TABLE t1 (
-pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES
-(10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'),
-(17,1,'f'),(18,5,'v'),(19,3,'f'),(20,2,'q'),(21,8,'y'),(22,0,'a'),(23,9,'w'),
-(24,3,'e'),(25,1,'b'),(26,9,'r'),(27,2,'k'),(28,5,'c'),(29,3,'k'),(30,9,'b'),
-(31,8,'j'),(32,1,'t'),(33,8,'n'),(34,3,'z'),(35,0,'u'),(36,3,'a'),(37,3,'g'),
-(38,1,'f'),(39,6,'p'),(40,6,'m'),(41,6,'t'),(42,7,'i'),(43,4,'h'),(44,3,'d'),
-(45,2,'b'),(46,1,'o'),(47,2,'j'),(48,6,'s'),(49,5,'q'),(50,6,'l'),(51,9,'j'),
-(52,6,'y'),(53,0,'i'),(54,7,'x'),(55,2,'u'),(56,6,'t'),(57,4,'b'),(58,5,'m'),
-(59,4,'x'),(60,8,'x'),(61,6,'v'),(62,8,'m'),(63,4,'j'),(64,8,'z'),(65,2,'a'),
-(66,9,'i'),(67,4,'g'),(68,8,'h'),(69,1,'p'),(70,8,'a'),(71,0,'x'),(72,2,'s'),
-(73,6,'k'),(74,0,'m'),(75,6,'e'),(76,9,'y'),(77,7,'d'),(78,7,'w'),(79,6,'y'),
-(80,9,'s'),(81,9,'x'),(82,6,'l'),(83,9,'f'),(84,8,'x'),(85,1,'p'),(86,7,'y'),
-(87,6,'p'),(88,1,'g'),(89,3,'c'),(90,5,'h'),(91,3,'p'),(92,2,'b'),(93,1,NULL),
-(94,3,NULL),(95,2,'y'),(96,7,'s'),(97,7,'x'),(98,6,'i'),(99,9,'t'),(100,5,'j'),
-(101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'),
-(107,8,'z'),(108,3,'k'),(109,65,NULL);
-CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM;
-INSERT INTO t2 VALUES (1,1,'i');
-INSERT INTO t2 SELECT * FROM t1;
-INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1;
-INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1;
-ANALYZE TABLE t1,t2 PERSISTENT FOR ALL;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status OK
-test.t2 analyze status Engine-independent statistics collected
-test.t2 analyze status OK
-SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 )
-WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
-pk1 a1 b1 pk2 a2 b2
-17 1 f 16 1 j
-37 3 g 36 3 a
-105 8 i 104 8 e
-EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 )
-WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 101 100.00 Using where
-1 PRIMARY t1 ref a1,b1 a1 5 test.t2.a2 36 28.75 Using where
-2 SUBQUERY t2 range PRIMARY PRIMARY 4 NULL 1 100.00 Using index condition
-Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t1`.`pk1` + 1 = `test`.`t2`.`pk2` + 2
-EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 )
-WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
-EXPLAIN
-{
- "query_block": {
- "select_id": 1,
- "nested_loop": [
- {
- "table": {
- "table_name": "t2",
- "access_type": "ALL",
- "rows": 101,
- "filtered": 100,
- "attached_condition": "t2.a2 is not null"
- }
- },
- {
- "table": {
- "table_name": "t1",
- "access_type": "ref",
- "possible_keys": ["a1", "b1"],
- "key": "a1",
- "key_length": "5",
- "used_key_parts": ["a1"],
- "ref": ["test.t2.a2"],
- "rows": 36,
- "filtered": 28.75,
- "attached_condition": "t1.b1 <= (subquery#2) and t1.pk1 + 1 = t2.pk2 + 2"
- }
- }
- ],
- "subqueries": [
- {
- "query_block": {
- "select_id": 2,
- "nested_loop": [
- {
- "table": {
- "table_name": "t2",
- "access_type": "range",
- "possible_keys": ["PRIMARY"],
- "key": "PRIMARY",
- "key_length": "4",
- "used_key_parts": ["pk2"],
- "rows": 1,
- "filtered": 100,
- "index_condition": "t2.pk2 <= 1"
- }
- }
- ]
- }
- }
- ]
- }
-}
-DROP TABLE t1,t2;
-#
-# MDEV-21794: Optimizer flag rowid_filter leads to long query
-#
-create table t10(a int);
-insert into t10 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t11(a int);
-insert into t11 select A.a + B.a* 10 + C.a * 100 from t10 A, t10 B, t10 C;
-CREATE TABLE t1 (
-el_id int(10) unsigned NOT NULL ,
-el_index blob NOT NULL,
-el_index_60 varbinary(60) NOT NULL,
-filler blob,
-PRIMARY KEY (el_id),
-KEY el_index (el_index(60)),
-KEY el_index_60 (el_index_60,el_id)
-);
-insert into t1
-select
-A.a+1000*B.a,
-A.a+1000*B.a + 10000,
-A.a+1000*B.a + 10000,
-'filler-data-filler-data'
-from
-t11 A, t10 B;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze Warning Engine-independent statistics are not collected for column 'el_index'
-test.t1 analyze Warning Engine-independent statistics are not collected for column 'filler'
-test.t1 analyze status Table is already up to date
-# This must not use rowid_filter with key=el_index|el_index_60:
-explain
-select * from t1
-where el_index like '10%' and (el_index_60 like '10%' or el_index_60 like '20%');
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range el_index,el_index_60 el_index 62 NULL 645 Using where
-drop table t10, t11, t1;
-#
-# MDEV-22160: SIGSEGV in st_join_table::save_explain_data on SELECT
-#
-set @save_optimizer_switch= @@optimizer_switch;
-SET @@optimizer_switch="index_merge_sort_union=OFF";
-CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b));
-INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4);
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-ANALYZE table t1 PERSISTENT FOR ALL;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status OK
-explain
-SELECT * FROM t1 WHERE a > 0 AND b=0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range|filter a,b a|b 5|5 NULL 77 (34%) Using index condition; Using where; Using rowid filter
-SELECT * FROM t1 WHERE a > 0 AND b=0;
-a b
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-drop table t1;
-SET @@optimizer_switch=@save_optimizer_switch;
-#
-# MDEV-28846: Poor performance when rowid filter contains no elements
-#
-create table t1 (
-pk int primary key auto_increment,
-nm varchar(32),
-fl1 tinyint default 0,
-fl2 tinyint default 0,
-index idx1(nm, fl1),
-index idx2(fl2)
-) engine=myisam;
-create table name (
-pk int primary key auto_increment,
-nm bigint
-) engine=myisam;
-create table flag2 (
-pk int primary key auto_increment,
-fl2 tinyint
-) engine=myisam;
-insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
-insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19);
-insert into t1(nm,fl2)
-select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status Table is already up to date
-select '500%' as a;
-a
-500%
-set optimizer_switch='rowid_filter=on';
-explain
-select * from t1 where nm like '500%' AND fl2 = 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where
-analyze format=json
-select * from t1 where nm like '500%' AND fl2 = 0;
-ANALYZE
-{
- "query_optimization": {
- "r_total_time_ms": "REPLACED"
- },
- "query_block": {
- "select_id": 1,
- "r_loops": 1,
- "r_total_time_ms": "REPLACED",
- "nested_loop": [
- {
- "table": {
- "table_name": "t1",
- "access_type": "range",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx1",
- "key_length": "35",
- "used_key_parts": ["nm"],
- "r_loops": 1,
- "rows": 1,
- "r_rows": 1,
- "r_table_time_ms": "REPLACED",
- "r_other_time_ms": "REPLACED",
- "filtered": 49.20000076,
- "r_filtered": 100,
- "index_condition": "t1.nm like '500%'",
- "attached_condition": "t1.fl2 = 0"
- }
- }
- ]
- }
-}
-select * from t1 where nm like '500%' AND fl2 = 0;
-pk nm fl1 fl2
-517 500 0 0
-truncate table name;
-truncate table flag2;
-truncate table t1;
-insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
-insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19);
-insert into t1(nm,fl2)
-select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status Table is already up to date
-set optimizer_switch='rowid_filter=off';
-explain
-select * from t1 where nm like '500%' AND fl2 = 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where
-analyze format=json
-select * from t1 where nm like '500%' AND fl2 = 0;
-ANALYZE
-{
- "query_optimization": {
- "r_total_time_ms": "REPLACED"
- },
- "query_block": {
- "select_id": 1,
- "r_loops": 1,
- "r_total_time_ms": "REPLACED",
- "nested_loop": [
- {
- "table": {
- "table_name": "t1",
- "access_type": "range",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx1",
- "key_length": "35",
- "used_key_parts": ["nm"],
- "r_loops": 1,
- "rows": 1,
- "r_rows": 1,
- "r_table_time_ms": "REPLACED",
- "r_other_time_ms": "REPLACED",
- "filtered": 49.20000076,
- "r_filtered": 100,
- "index_condition": "t1.nm like '500%'",
- "attached_condition": "t1.fl2 = 0"
- }
- }
- ]
- }
-}
-select * from t1 where nm like '500%' AND fl2 = 0;
-pk nm fl1 fl2
-517 500 0 0
-truncate table name;
-truncate table flag2;
-truncate table t1;
-insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
-insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19);
-insert into t1(nm,fl2)
-select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status Table is already up to date
-select '607%' as a;
-a
-607%
-set optimizer_switch='rowid_filter=on';
-explain
-select * from t1 where nm like '607%' AND fl2 = 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where
-select * from t1 where nm like '607%' AND fl2 = 0;
-pk nm fl1 fl2
-721 607 0 0
-truncate table name;
-truncate table flag2;
-truncate table t1;
-insert into name(nm) select seq from seq_1_to_10000 order by rand(17);
-insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19);
-insert into t1(nm,fl2)
-select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status Table is already up to date
-select '75%' as a;
-a
-75%
-set optimizer_switch='rowid_filter=on';
-explain
-select * from t1 where nm like '75%' AND fl2 = 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 2|35 const 55 (1%) Using where; Using rowid filter
-analyze format=json
-select * from t1 where nm like '75%' AND fl2 = 0;
-ANALYZE
-{
- "query_optimization": {
- "r_total_time_ms": "REPLACED"
- },
- "query_block": {
- "select_id": 1,
- "r_loops": 1,
- "r_total_time_ms": "REPLACED",
- "nested_loop": [
- {
- "table": {
- "table_name": "t1",
- "access_type": "ref",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx2",
- "key_length": "2",
- "used_key_parts": ["fl2"],
- "ref": ["const"],
- "rowid_filter": {
- "range": {
- "key": "idx1",
- "used_key_parts": ["nm"]
- },
- "rows": 115,
- "selectivity_pct": 1.15,
- "r_rows": 111,
- "r_lookups": 100,
- "r_selectivity_pct": 2,
- "r_buffer_size": "REPLACED",
- "r_filling_time_ms": "REPLACED"
- },
- "r_loops": 1,
- "rows": 55,
- "r_rows": 2,
- "r_table_time_ms": "REPLACED",
- "r_other_time_ms": "REPLACED",
- "filtered": 1.149999976,
- "r_filtered": 100,
- "attached_condition": "t1.nm like '75%'"
- }
- }
- ]
- }
-}
-select * from t1 where nm like '75%' AND fl2 = 0;
-pk nm fl1 fl2
-4543 7503 0 0
-7373 7518 0 0
-drop table name, flag2;
-drop table t1;
-create table t1 (
-pk int primary key auto_increment,
-nm char(255),
-fl1 tinyint default 0,
-fl2 int default 0,
-index idx1(nm, fl1),
-index idx2(fl2)
-) engine=myisam;
-create table name (
-pk int primary key auto_increment,
-nm bigint
-) engine=myisam;
-create table flag2 (
-pk int primary key auto_increment,
-fl2 int
-) engine=myisam;
-insert into name(nm) select seq from seq_1_to_10000 order by rand(17);
-insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19);
-insert into t1(nm,fl2)
-select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status Table is already up to date
-select * from t1
-where
-(
-nm like '3400%' or nm like '3402%' or nm like '3403%' or
-nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or
-nm like '3409%' or
-nm like '3411%' or nm like '3412%' or nm like '3413%' or
-nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or
-nm like '3418%' or nm like '3419%' or
-nm like '3421%' or nm like '3422%' or nm like '3423%' or
-nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or
-nm like '3428%' or nm like '3429%' or
-nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or
-nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or
-nm like '3439%' or
-nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or
-nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or
-nm like '3448%'
-) and fl2 = 0;
-pk nm fl1 fl2
-analyze format=json select * from t1
-where
-(
-nm like '3400%' or nm like '3402%' or nm like '3403%' or
-nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or
-nm like '3409%' or
-nm like '3411%' or nm like '3412%' or nm like '3413%' or
-nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or
-nm like '3418%' or nm like '3419%' or
-nm like '3421%' or nm like '3422%' or nm like '3423%' or
-nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or
-nm like '3428%' or nm like '3429%' or
-nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or
-nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or
-nm like '3439%' or
-nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or
-nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or
-nm like '3448%'
-) and fl2 = 0;
-ANALYZE
-{
- "query_optimization": {
- "r_total_time_ms": "REPLACED"
- },
- "query_block": {
- "select_id": 1,
- "r_loops": 1,
- "r_total_time_ms": "REPLACED",
- "nested_loop": [
- {
- "table": {
- "table_name": "t1",
- "access_type": "ref",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx2",
- "key_length": "5",
- "used_key_parts": ["fl2"],
- "ref": ["const"],
- "rowid_filter": {
- "range": {
- "key": "idx1",
- "used_key_parts": ["nm"]
- },
- "rows": 44,
- "selectivity_pct": 0.44,
- "r_rows": 44,
- "r_lookups": 1000,
- "r_selectivity_pct": 0,
- "r_buffer_size": "REPLACED",
- "r_filling_time_ms": "REPLACED"
- },
- "r_loops": 1,
- "rows": 863,
- "r_rows": 0,
- "r_table_time_ms": "REPLACED",
- "r_other_time_ms": "REPLACED",
- "filtered": 0.439999998,
- "r_filtered": 100,
- "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'"
- }
- }
- ]
- }
-}
-create table t0 select * from t1 where nm like '34%';
-delete from t1 using t1,t0 where t1.nm=t0.nm;
-analyze format=json select * from t1
-where
-(
-nm like '3400%' or nm like '3402%' or nm like '3403%' or
-nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or
-nm like '3409%' or
-nm like '3411%' or nm like '3412%' or nm like '3413%' or
-nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or
-nm like '3418%' or nm like '3419%' or
-nm like '3421%' or nm like '3422%' or nm like '3423%' or
-nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or
-nm like '3428%' or nm like '3429%' or
-nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or
-nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or
-nm like '3439%' or
-nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or
-nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or
-nm like '3448%'
-) and fl2 = 0;
-ANALYZE
-{
- "query_optimization": {
- "r_total_time_ms": "REPLACED"
- },
- "query_block": {
- "select_id": 1,
- "r_loops": 1,
- "r_total_time_ms": "REPLACED",
- "nested_loop": [
- {
- "table": {
- "table_name": "t1",
- "access_type": "ref",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx2",
- "key_length": "5",
- "used_key_parts": ["fl2"],
- "ref": ["const"],
- "rowid_filter": {
- "range": {
- "key": "idx1",
- "used_key_parts": ["nm"]
- },
- "rows": 44,
- "selectivity_pct": 0.44,
- "r_rows": 0,
- "r_lookups": 0,
- "r_selectivity_pct": 0,
- "r_buffer_size": "REPLACED",
- "r_filling_time_ms": "REPLACED"
- },
- "r_loops": 1,
- "rows": 853,
- "r_rows": 0,
- "filtered": 0.439999998,
- "r_filtered": 100,
- "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'"
- }
- }
- ]
- }
-}
-drop table t0;
-set optimizer_switch='rowid_filter=default';
-drop table name, flag2;
-drop table t1;
-set @@use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/main/rowid_filter.test b/mysql-test/main/rowid_filter.test
index a2543e197ca..c582a40523a 100644
--- a/mysql-test/main/rowid_filter.test
+++ b/mysql-test/main/rowid_filter.test
@@ -42,7 +42,7 @@ set statement optimizer_switch='rowid_filter=off' for;
select
100 *
(select count(*) from lineitem
- WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND l_quantity > 45
+ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND l_quantity > 47
)
/
(select count(*) from lineitem
@@ -52,9 +52,10 @@ select
let $q1=
SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
- l_quantity > 45;
+ l_quantity > 47;
eval $with_filter EXPLAIN $q1;
+--source include/explain-no-costs.inc
eval $with_filter EXPLAIN FORMAT=JSON $q1;
eval $with_filter ANALYZE $q1;
--source include/analyze-format.inc
@@ -63,6 +64,7 @@ eval $with_filter ANALYZE FORMAT=JSON $q1;
eval $with_filter $q1;
eval $without_filter EXPLAIN $q1;
+--source include/explain-no-costs.inc
eval $without_filter EXPLAIN FORMAT=JSON $q1;
eval $without_filter ANALYZE $q1;
--source include/analyze-format.inc
@@ -77,6 +79,7 @@ SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
o_totalprice between 200000 and 230000;
eval $with_filter EXPLAIN $q2;
+--source include/explain-no-costs.inc
eval $with_filter EXPLAIN FORMAT=JSON $q2;
eval $with_filter ANALYZE $q2;
--source include/analyze-format.inc
@@ -85,6 +88,7 @@ eval $with_filter ANALYZE FORMAT=JSON $q2;
eval $with_filter $q2;
eval $without_filter EXPLAIN $q2;
+--source include/explain-no-costs.inc
eval $without_filter EXPLAIN FORMAT=JSON $q2;
eval $without_filter ANALYZE $q2;
--source include/analyze-format.inc
@@ -96,10 +100,11 @@ let $q3=
SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
- l_quantity > 45 AND
+ l_quantity > 47 AND
o_totalprice between 180000 and 230000;
eval $with_filter EXPLAIN $q3;
+--source include/explain-no-costs.inc
eval $with_filter EXPLAIN FORMAT=JSON $q3;
eval $with_filter ANALYZE $q3;
--source include/analyze-format.inc
@@ -108,6 +113,7 @@ eval $with_filter ANALYZE FORMAT=JSON $q3;
eval $with_filter $q3;
eval $without_filter EXPLAIN $q3;
+--source include/explain-no-costs.inc
eval $without_filter EXPLAIN FORMAT=JSON $q3;
eval $without_filter ANALYZE $q3;
--source include/analyze-format.inc
@@ -115,6 +121,13 @@ eval $without_filter ANALYZE FORMAT=JSON $q3;
--sorted_result
eval $without_filter $q3;
+# Check different optimization
+eval $with_filter EXPLAIN SELECT STRAIGHT_JOIN o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+ FROM lineitem JOIN orders ON o_orderkey=l_orderkey
+ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+ l_quantity > 47 AND
+ o_totalprice between 180000 and 230000;
+
let $q4=
SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
@@ -122,6 +135,7 @@ SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
o_totalprice between 200000 and 230000;
eval $with_filter EXPLAIN $q4;
+--source include/explain-no-costs.inc
eval $with_filter EXPLAIN FORMAT=JSON $q4;
eval $with_filter ANALYZE $q4;
--source include/analyze-format.inc
@@ -130,6 +144,7 @@ eval $with_filter ANALYZE FORMAT=JSON $q4;
eval $with_filter $q4;
eval $without_filter EXPLAIN $q4;
+--source include/explain-no-costs.inc
eval $without_filter EXPLAIN FORMAT=JSON $q4;
eval $without_filter ANALYZE $q4;
--source include/analyze-format.inc
@@ -155,6 +170,7 @@ WHERE o_orderkey=l_orderkey AND
o_totalprice BETWEEN 200000 AND 250000;
eval $with_filter EXPLAIN $q5;
+--source include/explain-no-costs.inc
eval $with_filter EXPLAIN FORMAT=JSON $q5;
eval $with_filter ANALYZE $q5;
--source include/analyze-format.inc
@@ -163,6 +179,7 @@ eval $with_filter ANALYZE FORMAT=JSON $q5;
eval $with_filter $q5;
eval $without_filter EXPLAIN $q5;
+--source include/explain-no-costs.inc
eval $without_filter EXPLAIN FORMAT=JSON $q5;
eval $without_filter ANALYZE $q5;
--source include/analyze-format.inc
@@ -188,6 +205,7 @@ WHERE o_orderkey=l_orderkey AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
eval $with_filter EXPLAIN $q6;
+--source include/explain-no-costs.inc
eval $with_filter EXPLAIN FORMAT=JSON $q6;
eval $with_filter ANALYZE $q6;
--source include/analyze-format.inc
@@ -196,6 +214,7 @@ eval $with_filter ANALYZE FORMAT=JSON $q6;
eval $with_filter $q6;
eval $without_filter EXPLAIN $q6;
+--source include/explain-no-costs.inc
eval $without_filter EXPLAIN FORMAT=JSON $q6;
eval $without_filter ANALYZE $q6;
--source include/analyze-format.inc
@@ -216,21 +235,21 @@ WHERE o_orderkey=l_orderkey AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
eval $with_filter EXPLAIN $q7;
---replace_regex /"filtered": [0-9e\.\-+]*,/"filtered": "REPLACED",/
+--source include/explain-no-costs-filtered.inc
eval $with_filter EXPLAIN FORMAT=JSON $q7;
--replace_column 11 #
eval $with_filter ANALYZE $q7;
---replace_regex /("(r_(total|table|other)_time_ms|r_buffer_size|r_filling_time_ms|filtered)": )[^, \n]*/\1"REPLACED"/
+--source include/analyze-no-filtered.inc
eval $with_filter ANALYZE FORMAT=JSON $q7;
--sorted_result
eval $with_filter $q7;
eval $without_filter EXPLAIN $q7;
---replace_regex /"filtered": [0-9e\.\-+]*,/"filtered": "REPLACED",/
+--source include/explain-no-costs-filtered.inc
eval $without_filter EXPLAIN FORMAT=JSON $q7;
--replace_column 11 #
eval $without_filter ANALYZE $q7;
---replace_regex /("(r_(total|table|other)_time_ms|r_buffer_size|r_filling_time_ms|filtered)": )[^, \n]*/\1"REPLACED"/
+--source include/analyze-no-filtered.inc
eval $without_filter ANALYZE FORMAT=JSON $q7;
--sorted_result
eval $without_filter $q7;
@@ -243,371 +262,3 @@ DROP VIEW v1;
DROP DATABASE dbt3_s001;
use test;
-
---echo #
---echo # MDEV-18816: potential range filter for one join table with
---echo # impossible WHERE for another
---echo #
-
-create table t1 (
- pk int not null primary key, c2 varchar(10) , i1 int,key (c2)
-) engine=myisam;
-insert into t1 values (1,'a',-5),(2,'a',null);
-
-create table t2 (
- pk int, i1 int, c1 varchar(30) , key c1 (c1(30)), key i1 (i1)
-) engine=myisam;
-insert into t2 values
- (1,-5,'a'),(2,null,'a'),(3,null,'a'),(4,null,'a'),(5,5,'a'),(6,null,'a'),
- (7,4,'a'),(8,55,'a'),(9,null,'a'),(10,null,'a'),(11,null,'a'),(12,-5,'a'),
- (13,-5,'a'),(14,null,'a'),(15,null,'a'),(16,-5,'a'),(17,-5,'a');
-
-let $q=
-select 1
- from t1
- left join
- t2 join t1 as t1_a on t2.i1 = t1_a.pk
- on t1.c2 = t2.c1
-where t1_a.pk is null and t1_a.i1 != 3;
-
-eval $q;
-eval explain extended $q;
-
-drop table t1,t2;
-
---echo #
---echo # MDEV-18640: TABLE::prune_range_rowid_filters: Conditional jump or
---echo # move depends on uninitialized value
---echo #
-
-CREATE TABLE t1 (
- pk INT, i INT, PRIMARY KEY (pk), KEY (pk,i)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (1,10), (7,70), (2,20);
-
-SELECT * FROM t1 WHERE pk < 5;
-
-DROP TABLE t1;
-
---echo #
---echo # MDEV-18956: Possible rowid filter for subquery for which
---echo # in_to_exists strategy has been chosen
---echo #
-
-CREATE TABLE t1 (pk int) engine=myisam ;
-INSERT INTO t1 VALUES (1),(2);
-
-CREATE TABLE t2 (
- pk int auto_increment PRIMARY KEY,
- i1 int, i2 int, c2 varchar(1),
- KEY (i1), KEY (i2)
-) engine=myisam;
-
-INSERT INTO t2 VALUES
- (1,8,6,'t'),(2,5,7,'i'),(3,4,4,'h'),(4,207,38,'d'),(5,183,206,'b'),
- (6,7,null,'o'),(7,1,2,'j'),(8,17,36,'s'),(9,4,5,'q'),(10,0,6,'l'),
- (11,1,9,'j'),(12,5,6,'y'),(13,null,0,'i'),(14,7,7,'x'),(15,5,2,'u');
-
-SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
-EXPLAIN EXTENDED
-SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
-
-DROP TABLE t1,t2;
-
---echo #
---echo # MDEV-19255: rowid range filter built for range condition
---echo # that uses in expensive subquery
---echo #
-
-CREATE TABLE t1 (
- pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES
-(10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'),
-(17,1,'f'),(18,5,'v'),(19,3,'f'),(20,2,'q'),(21,8,'y'),(22,0,'a'),(23,9,'w'),
-(24,3,'e'),(25,1,'b'),(26,9,'r'),(27,2,'k'),(28,5,'c'),(29,3,'k'),(30,9,'b'),
-(31,8,'j'),(32,1,'t'),(33,8,'n'),(34,3,'z'),(35,0,'u'),(36,3,'a'),(37,3,'g'),
-(38,1,'f'),(39,6,'p'),(40,6,'m'),(41,6,'t'),(42,7,'i'),(43,4,'h'),(44,3,'d'),
-(45,2,'b'),(46,1,'o'),(47,2,'j'),(48,6,'s'),(49,5,'q'),(50,6,'l'),(51,9,'j'),
-(52,6,'y'),(53,0,'i'),(54,7,'x'),(55,2,'u'),(56,6,'t'),(57,4,'b'),(58,5,'m'),
-(59,4,'x'),(60,8,'x'),(61,6,'v'),(62,8,'m'),(63,4,'j'),(64,8,'z'),(65,2,'a'),
-(66,9,'i'),(67,4,'g'),(68,8,'h'),(69,1,'p'),(70,8,'a'),(71,0,'x'),(72,2,'s'),
-(73,6,'k'),(74,0,'m'),(75,6,'e'),(76,9,'y'),(77,7,'d'),(78,7,'w'),(79,6,'y'),
-(80,9,'s'),(81,9,'x'),(82,6,'l'),(83,9,'f'),(84,8,'x'),(85,1,'p'),(86,7,'y'),
-(87,6,'p'),(88,1,'g'),(89,3,'c'),(90,5,'h'),(91,3,'p'),(92,2,'b'),(93,1,NULL),
-(94,3,NULL),(95,2,'y'),(96,7,'s'),(97,7,'x'),(98,6,'i'),(99,9,'t'),(100,5,'j'),
-(101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'),
-(107,8,'z'),(108,3,'k'),(109,65,NULL);
-
-CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM;
-INSERT INTO t2 VALUES (1,1,'i');
-INSERT INTO t2 SELECT * FROM t1;
-
-INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1;
-INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1;
-
-ANALYZE TABLE t1,t2 PERSISTENT FOR ALL;
-
-let $q=
-SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 )
- WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
-
-eval $q;
-eval EXPLAIN EXTENDED $q;
-eval EXPLAIN FORMAT=JSON $q;
-
-DROP TABLE t1,t2;
-
---echo #
---echo # MDEV-21794: Optimizer flag rowid_filter leads to long query
---echo #
-create table t10(a int);
-insert into t10 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-
-create table t11(a int);
-insert into t11 select A.a + B.a* 10 + C.a * 100 from t10 A, t10 B, t10 C;
-
-CREATE TABLE t1 (
- el_id int(10) unsigned NOT NULL ,
- el_index blob NOT NULL,
- el_index_60 varbinary(60) NOT NULL,
- filler blob,
-
- PRIMARY KEY (el_id),
- KEY el_index (el_index(60)),
- KEY el_index_60 (el_index_60,el_id)
-);
-
-insert into t1
-select
- A.a+1000*B.a,
- A.a+1000*B.a + 10000,
- A.a+1000*B.a + 10000,
- 'filler-data-filler-data'
-from
- t11 A, t10 B;
-analyze table t1 persistent for all;
-
---echo # This must not use rowid_filter with key=el_index|el_index_60:
-explain
-select * from t1
-where el_index like '10%' and (el_index_60 like '10%' or el_index_60 like '20%');
-
-drop table t10, t11, t1;
-
-
---echo #
---echo # MDEV-22160: SIGSEGV in st_join_table::save_explain_data on SELECT
---echo #
-
-set @save_optimizer_switch= @@optimizer_switch;
-SET @@optimizer_switch="index_merge_sort_union=OFF";
-CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b));
-INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4);
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-
-ANALYZE table t1 PERSISTENT FOR ALL;
-
-explain
-SELECT * FROM t1 WHERE a > 0 AND b=0;
-SELECT * FROM t1 WHERE a > 0 AND b=0;
-drop table t1;
-SET @@optimizer_switch=@save_optimizer_switch;
-
-
---echo #
---echo # MDEV-28846: Poor performance when rowid filter contains no elements
---echo #
-
---source include/have_sequence.inc
-
-create table t1 (
- pk int primary key auto_increment,
- nm varchar(32),
- fl1 tinyint default 0,
- fl2 tinyint default 0,
- index idx1(nm, fl1),
- index idx2(fl2)
-) engine=myisam;
-
-create table name (
- pk int primary key auto_increment,
- nm bigint
-) engine=myisam;
-
-create table flag2 (
- pk int primary key auto_increment,
- fl2 tinyint
-) engine=myisam;
-
-insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
-insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19);
-
-insert into t1(nm,fl2)
- select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-
-analyze table t1 persistent for all;
-
-let $a=
-`select concat((select nm from t1 where fl2=0 order by RAND(13) limit 1),'%')`;
-eval select '$a' as a;
-
-set optimizer_switch='rowid_filter=on';
-eval
-explain
-select * from t1 where nm like '$a' AND fl2 = 0;
---source include/analyze-format.inc
-eval
-analyze format=json
-select * from t1 where nm like '$a' AND fl2 = 0;
-eval
-select * from t1 where nm like '$a' AND fl2 = 0;
-
-truncate table name;
-truncate table flag2;
-truncate table t1;
-
-insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
-insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19);
-
-insert into t1(nm,fl2)
- select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-
-analyze table t1 persistent for all;
-
-set optimizer_switch='rowid_filter=off';
-eval
-explain
-select * from t1 where nm like '$a' AND fl2 = 0;
---source include/analyze-format.inc
-eval
-analyze format=json
-select * from t1 where nm like '$a' AND fl2 = 0;
-eval
-select * from t1 where nm like '$a' AND fl2 = 0;
-
-truncate table name;
-truncate table flag2;
-truncate table t1;
-
-insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
-insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19);
-
-insert into t1(nm,fl2)
- select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-
-analyze table t1 persistent for all;
-
-let $a=
-`select concat((select nm from t1 where fl2=0 order by RAND(13) limit 1),'%')`;
-eval select '$a' as a;
-
-set optimizer_switch='rowid_filter=on';
-eval
-explain
-select * from t1 where nm like '$a' AND fl2 = 0;
-eval
-select * from t1 where nm like '$a' AND fl2 = 0;
-
-truncate table name;
-truncate table flag2;
-truncate table t1;
-
-insert into name(nm) select seq from seq_1_to_10000 order by rand(17);
-insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19);
-
-insert into t1(nm,fl2)
- select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-
-analyze table t1 persistent for all;
-
-let $a=
-`select concat(left((select nm from t1 where fl2=0 order by RAND(13) limit 1),2),'%')`;
-eval select '$a' as a;
-
-set optimizer_switch='rowid_filter=on';
-eval
-explain
-select * from t1 where nm like '$a' AND fl2 = 0;
---source include/analyze-format.inc
-eval
-analyze format=json
-select * from t1 where nm like '$a' AND fl2 = 0;
-eval
-select * from t1 where nm like '$a' AND fl2 = 0;
-
-drop table name, flag2;
-drop table t1;
-
-# This test shows that if the container is empty there are no lookups into it
-
-create table t1 (
- pk int primary key auto_increment,
- nm char(255),
- fl1 tinyint default 0,
- fl2 int default 0,
- index idx1(nm, fl1),
- index idx2(fl2)
-) engine=myisam;
-
-create table name (
- pk int primary key auto_increment,
- nm bigint
-) engine=myisam;
-
-create table flag2 (
- pk int primary key auto_increment,
- fl2 int
-) engine=myisam;
-
-insert into name(nm) select seq from seq_1_to_10000 order by rand(17);
-insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19);
-
-insert into t1(nm,fl2)
- select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-
-analyze table t1 persistent for all;
-
-let $q=
-select * from t1
-where
-(
- nm like '3400%' or nm like '3402%' or nm like '3403%' or
- nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or
- nm like '3409%' or
- nm like '3411%' or nm like '3412%' or nm like '3413%' or
- nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or
- nm like '3418%' or nm like '3419%' or
- nm like '3421%' or nm like '3422%' or nm like '3423%' or
- nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or
- nm like '3428%' or nm like '3429%' or
- nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or
- nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or
- nm like '3439%' or
- nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or
- nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or
- nm like '3448%'
-) and fl2 = 0;
-
-eval $q;
---source include/analyze-format.inc
-eval analyze format=json $q;
-
-create table t0 select * from t1 where nm like '34%';
-delete from t1 using t1,t0 where t1.nm=t0.nm;
---source include/analyze-format.inc
-eval analyze format=json $q;
-
-drop table t0;
-
-set optimizer_switch='rowid_filter=default';
-
-drop table name, flag2;
-drop table t1;
-
-set @@use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/main/rowid_filter_aria.result b/mysql-test/main/rowid_filter_aria.result
new file mode 100644
index 00000000000..b7a3a29ab9c
--- /dev/null
+++ b/mysql-test/main/rowid_filter_aria.result
@@ -0,0 +1,2245 @@
+SET SESSION DEFAULT_STORAGE_ENGINE='Aria';
+DROP DATABASE IF EXISTS dbt3_s001;
+CREATE DATABASE dbt3_s001;
+use dbt3_s001;
+CREATE INDEX i_l_quantity ON lineitem(l_quantity);
+CREATE INDEX i_o_totalprice ON orders(o_totalprice);
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables=preferably;
+ANALYZE TABLE lineitem, orders;
+show create table lineitem;
+Table Create Table
+lineitem CREATE TABLE `lineitem` (
+ `l_orderkey` int(11) NOT NULL DEFAULT 0,
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL DEFAULT 0,
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ PRIMARY KEY (`l_orderkey`,`l_linenumber`),
+ KEY `i_l_shipdate` (`l_shipDATE`),
+ KEY `i_l_suppkey_partkey` (`l_partkey`,`l_suppkey`),
+ KEY `i_l_partkey` (`l_partkey`),
+ KEY `i_l_suppkey` (`l_suppkey`),
+ KEY `i_l_receiptdate` (`l_receiptDATE`),
+ KEY `i_l_orderkey` (`l_orderkey`),
+ KEY `i_l_orderkey_quantity` (`l_orderkey`,`l_quantity`),
+ KEY `i_l_commitdate` (`l_commitDATE`),
+ KEY `i_l_quantity` (`l_quantity`)
+) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1
+show create table orders;
+Table Create Table
+orders CREATE TABLE `orders` (
+ `o_orderkey` int(11) NOT NULL,
+ `o_custkey` int(11) DEFAULT NULL,
+ `o_orderstatus` char(1) DEFAULT NULL,
+ `o_totalprice` double DEFAULT NULL,
+ `o_orderDATE` date DEFAULT NULL,
+ `o_orderpriority` char(15) DEFAULT NULL,
+ `o_clerk` char(15) DEFAULT NULL,
+ `o_shippriority` int(11) DEFAULT NULL,
+ `o_comment` varchar(79) DEFAULT NULL,
+ PRIMARY KEY (`o_orderkey`),
+ KEY `i_o_orderdate` (`o_orderDATE`),
+ KEY `i_o_custkey` (`o_custkey`),
+ KEY `i_o_totalprice` (`o_totalprice`)
+) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1
+set optimizer_use_condition_selectivity=2;
+select
+100 *
+(select count(*) from lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND l_quantity > 47
+)
+/
+(select count(*) from lineitem
+where l_shipdate BETWEEN '1997-01-01' AND '1997-06-30')
+as correct_r_filtered_when_using_l_shipdate;
+correct_r_filtered_when_using_l_shipdate
+6.6667
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 482 (12%) Using index condition; Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_quantity",
+ "used_key_parts": ["l_quantity"]
+ },
+ "rows": 709,
+ "selectivity_pct": 11.80682764
+ },
+ "loops": 1,
+ "rows": 482,
+ "cost": "COST_REPLACED",
+ "filtered": 11.80682755,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 47"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 482 (12%) 34.00 (6%) 11.81 100.00 Using index condition; Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_quantity",
+ "used_key_parts": ["l_quantity"]
+ },
+ "rows": 709,
+ "selectivity_pct": 11.80682764,
+ "r_rows": 349,
+ "r_lookups": 510,
+ "r_selectivity_pct": 6.666666667,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 482,
+ "r_rows": 34,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 11.80682755,
+ "r_filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 47"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47;
+l_orderkey l_linenumber l_shipdate l_quantity
+1121 6 1997-04-21 50
+1441 7 1997-06-07 50
+1473 1 1997-05-05 50
+1954 7 1997-06-04 49
+2151 3 1997-01-20 49
+2469 3 1997-01-11 48
+2469 6 1997-03-03 49
+2470 2 1997-06-02 50
+260 1 1997-03-24 50
+288 2 1997-04-19 49
+289 4 1997-03-14 48
+3009 1 1997-03-19 48
+3105 3 1997-02-28 48
+3106 2 1997-02-27 49
+3429 1 1997-04-08 48
+3490 2 1997-06-27 50
+3619 1 1997-01-22 49
+4005 4 1997-01-31 49
+4066 4 1997-02-17 49
+4453 3 1997-05-29 48
+4484 7 1997-03-17 50
+484 1 1997-03-06 49
+484 3 1997-01-24 50
+484 5 1997-03-05 48
+485 1 1997-03-28 50
+4868 3 1997-04-23 49
+4934 1 1997-05-20 48
+4967 1 1997-05-27 50
+5152 2 1997-03-10 50
+5158 4 1997-04-10 49
+581 3 1997-02-27 49
+5829 5 1997-01-31 49
+5895 3 1997-03-15 49
+5952 1 1997-06-30 49
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 482 Using index condition; Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "loops": 1,
+ "rows": 482,
+ "cost": "COST_REPLACED",
+ "filtered": 11.80682755,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 47"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 482 510.00 11.81 6.67 Using index condition; Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 482,
+ "r_rows": 510,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 11.80682755,
+ "r_filtered": 6.666666667,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 47"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47;
+l_orderkey l_linenumber l_shipdate l_quantity
+1121 6 1997-04-21 50
+1441 7 1997-06-07 50
+1473 1 1997-05-05 50
+1954 7 1997-06-04 49
+2151 3 1997-01-20 49
+2469 3 1997-01-11 48
+2469 6 1997-03-03 49
+2470 2 1997-06-02 50
+260 1 1997-03-24 50
+288 2 1997-04-19 49
+289 4 1997-03-14 48
+3009 1 1997-03-19 48
+3105 3 1997-02-28 48
+3106 2 1997-02-27 49
+3429 1 1997-04-08 48
+3490 2 1997-06-27 50
+3619 1 1997-01-22 49
+4005 4 1997-01-31 49
+4066 4 1997-02-17 49
+4453 3 1997-05-29 48
+4484 7 1997-03-17 50
+484 1 1997-03-06 49
+484 3 1997-01-24 50
+484 5 1997-03-05 48
+485 1 1997-03-28 50
+4868 3 1997-04-23 49
+4934 1 1997-05-20 48
+4967 1 1997-05-27 50
+5152 2 1997-03-10 50
+5158 4 1997-04-10 49
+581 3 1997-02-27 49
+5829 5 1997-01-31 49
+5895 3 1997-03-15 49
+5952 1 1997-06-30 49
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 92 Using index condition
+1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (7%) Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "loops": 1,
+ "rows": 92,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_o_totalprice",
+ "used_key_parts": ["o_totalprice"]
+ },
+ "rows": 106,
+ "selectivity_pct": 7.066666667
+ },
+ "loops": 92,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 7.066666603,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 92 98.00 100.00 100.00 Using index condition
+1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (7%) 0.11 (10%) 7.07 100.00 Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 92,
+ "r_rows": 98,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_o_totalprice",
+ "used_key_parts": ["o_totalprice"]
+ },
+ "rows": 106,
+ "selectivity_pct": 7.066666667,
+ "r_rows": 71,
+ "r_lookups": 96,
+ "r_selectivity_pct": 10.41666667,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "loops": 92,
+ "r_loops": 98,
+ "r_table_loops": 96,
+ "rows": 1,
+ "r_rows": 0.112244898,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 7.066666603,
+ "r_filtered": 100,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 6 1997-01-25 222274.54
+484 3 1997-01-24 219920.62
+5606 6 1997-01-11 219959.08
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 92 Using index condition
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "loops": 1,
+ "rows": 92,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 92,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 7.066666603,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 92 98.00 100.00 100.00 Using index condition
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 7.07 11.22 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 92,
+ "r_rows": 98,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 92,
+ "r_loops": 98,
+ "r_table_loops": 96,
+ "rows": 1,
+ "r_rows": 1,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 7.066666603,
+ "r_filtered": 11.2244898,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 6 1997-01-25 222274.54
+484 3 1997-01-24 219920.62
+5606 6 1997-01-11 219959.08
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 482 (12%) Using index condition; Using where; Using rowid filter
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_quantity",
+ "used_key_parts": ["l_quantity"]
+ },
+ "rows": 709,
+ "selectivity_pct": 11.80682764
+ },
+ "loops": 1,
+ "rows": 482,
+ "cost": "COST_REPLACED",
+ "filtered": 11.80682755,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 47"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 56.90890924,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 14.39999962,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 482 (12%) 34.00 (6%) 11.81 100.00 Using index condition; Using where; Using rowid filter
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 14.40 26.47 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_quantity",
+ "used_key_parts": ["l_quantity"]
+ },
+ "rows": 709,
+ "selectivity_pct": 11.80682764,
+ "r_rows": 349,
+ "r_lookups": 510,
+ "r_selectivity_pct": 6.666666667,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 482,
+ "r_rows": 34,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 11.80682755,
+ "r_filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 47"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 56.90890924,
+ "r_loops": 34,
+ "r_table_loops": 33,
+ "rows": 1,
+ "r_rows": 1,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 14.39999962,
+ "r_filtered": 26.47058824,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+o_orderkey l_linenumber l_shipdate l_quantity o_totalprice
+2469 3 1997-01-11 48 192074.23
+2469 6 1997-03-03 49 192074.23
+3619 1 1997-01-22 49 222274.54
+484 1 1997-03-06 49 219920.62
+484 3 1997-01-24 50 219920.62
+484 5 1997-03-05 48 219920.62
+4934 1 1997-05-20 48 180478.16
+5829 5 1997-01-31 49 183734.56
+5895 3 1997-03-15 49 201419.83
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate 4 NULL 482 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "loops": 1,
+ "rows": 482,
+ "cost": "COST_REPLACED",
+ "filtered": 11.80682755,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 47"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 56.90890924,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 14.39999962,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate 4 NULL 482 510.00 11.81 6.67 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 14.40 26.47 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 482,
+ "r_rows": 510,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 11.80682755,
+ "r_filtered": 6.666666667,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 47"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 56.90890924,
+ "r_loops": 34,
+ "r_table_loops": 33,
+ "rows": 1,
+ "r_rows": 1,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 14.39999962,
+ "r_filtered": 26.47058824,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+o_orderkey l_linenumber l_shipdate l_quantity o_totalprice
+2469 3 1997-01-11 48 192074.23
+2469 6 1997-03-03 49 192074.23
+3619 1 1997-01-22 49 222274.54
+484 1 1997-03-06 49 219920.62
+484 3 1997-01-24 50 219920.62
+484 5 1997-03-05 48 219920.62
+4934 1 1997-05-20 48 180478.16
+5829 5 1997-01-31 49 183734.56
+5895 3 1997-03-15 49 201419.83
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT STRAIGHT_JOIN o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM lineitem JOIN orders ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 482 (12%) Using index condition; Using where; Using rowid filter
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 106 Using index condition
+1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "loops": 1,
+ "rows": 106,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "index_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_orderkey",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
+ },
+ "rows": 482,
+ "selectivity_pct": 8.026644463
+ },
+ "loops": 106,
+ "rows": 4,
+ "cost": "COST_REPLACED",
+ "filtered": 8.026644707,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 106 71.00 100.00 100.00 Using index condition
+1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) 0.52 (7%) 8.03 100.00 Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 106,
+ "r_rows": 71,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "index_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_orderkey",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
+ },
+ "rows": 482,
+ "selectivity_pct": 8.026644463,
+ "r_rows": 510,
+ "r_lookups": 476,
+ "r_selectivity_pct": 7.773109244,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "loops": 106,
+ "r_loops": 71,
+ "rows": 4,
+ "r_rows": 0.521126761,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 8.026644707,
+ "r_filtered": 100,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+1890 1 1997-04-02 202364.58
+1890 3 1997-02-09 202364.58
+1890 4 1997-04-08 202364.58
+1890 5 1997-04-15 202364.58
+1890 6 1997-02-13 202364.58
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 4 1997-03-18 222274.54
+3619 6 1997-01-25 222274.54
+453 1 1997-06-30 216826.73
+453 2 1997-06-30 216826.73
+484 1 1997-03-06 219920.62
+484 2 1997-04-09 219920.62
+484 3 1997-01-24 219920.62
+484 4 1997-04-29 219920.62
+484 5 1997-03-05 219920.62
+484 6 1997-04-06 219920.62
+5606 2 1997-02-23 219959.08
+5606 3 1997-03-11 219959.08
+5606 4 1997-02-06 219959.08
+5606 6 1997-01-11 219959.08
+5606 7 1997-02-01 219959.08
+5859 2 1997-05-15 210643.96
+5859 5 1997-05-28 210643.96
+5859 6 1997-06-15 210643.96
+5895 1 1997-04-05 201419.83
+5895 2 1997-04-27 201419.83
+5895 3 1997-03-15 201419.83
+5895 4 1997-03-03 201419.83
+5895 5 1997-04-30 201419.83
+5895 6 1997-04-19 201419.83
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 106 Using index condition
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "loops": 1,
+ "rows": 106,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "index_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_orderkey",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 106,
+ "rows": 4,
+ "cost": "COST_REPLACED",
+ "filtered": 8.026644707,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 106 71.00 100.00 100.00 Using index condition
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey 4 dbt3_s001.orders.o_orderkey 4 6.70 8.03 7.77 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 106,
+ "r_rows": 71,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "index_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_orderkey",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 106,
+ "r_loops": 71,
+ "rows": 4,
+ "r_rows": 6.704225352,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 8.026644707,
+ "r_filtered": 7.773109244,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+1890 1 1997-04-02 202364.58
+1890 3 1997-02-09 202364.58
+1890 4 1997-04-08 202364.58
+1890 5 1997-04-15 202364.58
+1890 6 1997-02-13 202364.58
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 4 1997-03-18 222274.54
+3619 6 1997-01-25 222274.54
+453 1 1997-06-30 216826.73
+453 2 1997-06-30 216826.73
+484 1 1997-03-06 219920.62
+484 2 1997-04-09 219920.62
+484 3 1997-01-24 219920.62
+484 4 1997-04-29 219920.62
+484 5 1997-03-05 219920.62
+484 6 1997-04-06 219920.62
+5606 2 1997-02-23 219959.08
+5606 3 1997-03-11 219959.08
+5606 4 1997-02-06 219959.08
+5606 6 1997-01-11 219959.08
+5606 7 1997-02-01 219959.08
+5859 2 1997-05-15 210643.96
+5859 5 1997-05-28 210643.96
+5859 6 1997-06-15 210643.96
+5895 1 1997-04-05 201419.83
+5895 2 1997-04-27 201419.83
+5895 3 1997-03-15 201419.83
+5895 4 1997-03-03 201419.83
+5895 5 1997-04-30 201419.83
+5895 6 1997-04-19 201419.83
+#
+# MDEV-18413: find constraint correlated indexes
+#
+ALTER TABLE lineitem ADD CONSTRAINT l_date CHECK(l_shipdate < l_receiptdate);
+# Filter on l_shipdate is not used because it participates in
+# the same constraint as l_receiptdate.
+# Access is made on l_receiptdate.
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 17 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
+ "rows": 17,
+ "cost": "COST_REPLACED",
+ "filtered": 0.532889247,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 8.466666222,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 17 18.00 0.53 38.89 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 8.47 14.29 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 17,
+ "r_rows": 18,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 0.532889247,
+ "r_filtered": 38.88888889,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
+ "r_loops": 7,
+ "rows": 1,
+ "r_rows": 1,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 8.466666222,
+ "r_filtered": 14.28571429,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+l_shipdate l_receiptdate o_totalprice
+1996-10-07 1996-10-08 202623.92
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 17 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
+ "rows": 17,
+ "cost": "COST_REPLACED",
+ "filtered": 0.532889247,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 8.466666222,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 17 18.00 0.53 38.89 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 8.47 14.29 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 17,
+ "r_rows": 18,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 0.532889247,
+ "r_filtered": 38.88888889,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
+ "r_loops": 7,
+ "rows": 1,
+ "r_rows": 1,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 8.466666222,
+ "r_filtered": 14.28571429,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+l_shipdate l_receiptdate o_totalprice
+1996-10-07 1996-10-08 202623.92
+ALTER TABLE orders ADD COLUMN o_totaldiscount double;
+UPDATE orders SET o_totaldiscount = o_totalprice*(o_custkey/1000);
+CREATE INDEX i_o_totaldiscount on orders(o_totaldiscount);
+ALTER TABLE orders ADD CONSTRAINT o_price CHECK(o_totalprice > o_totaldiscount);
+# Filter on o_totalprice is not used because it participates in
+# the same constraint as o_discount.
+# Access is made on o_discount.
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 61 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
+ "rows": 61,
+ "cost": "COST_REPLACED",
+ "filtered": 5,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 3.05,
+ "rows": 4,
+ "cost": "COST_REPLACED",
+ "filtered": 2.897585392,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 61 41.00 5.00 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 2.90 66.67 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 61,
+ "r_rows": 41,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 5,
+ "r_filtered": 2.43902439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 3.05,
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 2.897585392,
+ "r_filtered": 66.66666667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 61 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
+ "rows": 61,
+ "cost": "COST_REPLACED",
+ "filtered": 5,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 3.05,
+ "rows": 4,
+ "cost": "COST_REPLACED",
+ "filtered": 2.897585392,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 61 41.00 5.00 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 2.90 66.67 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 61,
+ "r_rows": 41,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 5,
+ "r_filtered": 2.43902439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 3.05,
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 2.897585392,
+ "r_filtered": 66.66666667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+CREATE VIEW v1 AS
+SELECT * FROM orders
+WHERE o_orderdate BETWEEN '1992-12-01' AND '1997-01-01';
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 61 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
+ "rows": 61,
+ "cost": "REPLACED",
+ "filtered": "REPLACED",
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.779166667,
+ "rows": 4,
+ "cost": "REPLACED",
+ "filtered": "REPLACED",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 61 41.00 # 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 # 66.67 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 61,
+ "r_rows": 41,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": "REPLACED",
+ "r_filtered": 2.43902439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.779166667,
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": "REPLACED",
+ "r_filtered": 66.66666667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 61 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
+ "rows": 61,
+ "cost": "REPLACED",
+ "filtered": "REPLACED",
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.779166667,
+ "rows": 4,
+ "cost": "REPLACED",
+ "filtered": "REPLACED",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 61 41.00 # 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 # 66.67 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 61,
+ "r_rows": 41,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": "REPLACED",
+ "r_filtered": 2.43902439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ }
+ },
+ {
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.779166667,
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": "REPLACED",
+ "r_filtered": 66.66666667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+ ]
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+ALTER TABLE lineitem DROP CONSTRAINT l_date;
+ALTER TABLE orders DROP CONSTRAINT o_price;
+ALTER TABLE orders DROP COLUMN o_totaldiscount;
+DROP VIEW v1;
+DROP DATABASE dbt3_s001;
+use test;
diff --git a/mysql-test/main/rowid_filter_aria.test b/mysql-test/main/rowid_filter_aria.test
new file mode 100644
index 00000000000..869d398f791
--- /dev/null
+++ b/mysql-test/main/rowid_filter_aria.test
@@ -0,0 +1,9 @@
+#
+# Test rowid filters with Aria
+#
+
+SET SESSION DEFAULT_STORAGE_ENGINE='Aria';
+
+#set global aria.optimizer_rowid_compare_cost=0.00001;
+#set global aria.optimizer_rowid_copy_cost=0.00001;
+--source rowid_filter.test
diff --git a/mysql-test/main/rowid_filter_innodb.result b/mysql-test/main/rowid_filter_innodb.result
index e10ddd6658b..da6c848e334 100644
--- a/mysql-test/main/rowid_filter_innodb.result
+++ b/mysql-test/main/rowid_filter_innodb.result
@@ -60,47 +60,50 @@ set optimizer_use_condition_selectivity=2;
select
100 *
(select count(*) from lineitem
-WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND l_quantity > 45
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND l_quantity > 47
)
/
(select count(*) from lineitem
where l_shipdate BETWEEN '1997-01-01' AND '1997-06-30')
as correct_r_filtered_when_using_l_shipdate;
correct_r_filtered_when_using_l_shipdate
-11.7647
+6.6667
set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 510 (10%) Using index condition; Using where; Using rowid filter
+1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_quantity|i_l_shipdate 9|4 NULL 349 (8%) Using index condition; Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "lineitem",
"access_type": "range",
"possible_keys": ["i_l_shipdate", "i_l_quantity"],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
"rowid_filter": {
"range": {
- "key": "i_l_quantity",
- "used_key_parts": ["l_quantity"]
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
},
- "rows": 605,
- "selectivity_pct": 10.07493755
+ "rows": 510,
+ "selectivity_pct": 8.492922565
},
- "rows": 510,
- "filtered": 10.07493782,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "loops": 1,
+ "rows": 349,
+ "cost": "COST_REPLACED",
+ "filtered": 8.492922783,
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
}
]
@@ -108,12 +111,12 @@ EXPLAIN
}
set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 510 (10%) 60.00 (11%) 10.07 100.00 Using index condition; Using where; Using rowid filter
+1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_quantity|i_l_shipdate 9|4 NULL 349 (8%) 34.00 (9%) 8.49 100.00 Using index condition; Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
ANALYZE
{
"query_optimization": {
@@ -121,6 +124,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -129,31 +133,33 @@ ANALYZE
"table_name": "lineitem",
"access_type": "range",
"possible_keys": ["i_l_shipdate", "i_l_quantity"],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
"rowid_filter": {
"range": {
- "key": "i_l_quantity",
- "used_key_parts": ["l_quantity"]
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
},
- "rows": 605,
- "selectivity_pct": 10.07493755,
- "r_rows": 605,
- "r_lookups": 510,
- "r_selectivity_pct": 11.76470588,
+ "rows": 510,
+ "selectivity_pct": 8.492922565,
+ "r_rows": 510,
+ "r_lookups": 349,
+ "r_selectivity_pct": 9.742120344,
"r_buffer_size": "REPLACED",
"r_filling_time_ms": "REPLACED"
},
+ "loops": 1,
"r_loops": 1,
- "rows": 510,
- "r_rows": 60,
+ "rows": 349,
+ "r_rows": 34,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
- "filtered": 10.07493782,
+ "filtered": 8.492922783,
"r_filtered": 100,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
}
]
@@ -161,21 +167,13 @@ ANALYZE
}
set statement optimizer_switch='rowid_filter=on' for SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
l_orderkey l_linenumber l_shipdate l_quantity
-1121 5 1997-04-27 47
1121 6 1997-04-21 50
1441 7 1997-06-07 50
-1443 1 1997-02-05 47
1473 1 1997-05-05 50
-1568 2 1997-04-06 46
-1632 1 1997-01-25 47
-1632 3 1997-01-29 47
1954 7 1997-06-04 49
-1959 1 1997-05-05 46
2151 3 1997-01-20 49
-2177 5 1997-05-10 46
-2369 2 1997-01-02 47
2469 3 1997-01-11 48
2469 6 1997-03-03 49
2470 2 1997-06-02 50
@@ -188,66 +186,51 @@ l_orderkey l_linenumber l_shipdate l_quantity
3429 1 1997-04-08 48
3490 2 1997-06-27 50
3619 1 1997-01-22 49
-3619 3 1997-01-31 46
-3969 3 1997-05-29 46
4005 4 1997-01-31 49
-4036 1 1997-06-21 46
4066 4 1997-02-17 49
-4098 1 1997-01-26 46
-422 3 1997-06-21 46
-4258 3 1997-01-02 46
-4421 2 1997-04-21 46
-4421 3 1997-05-25 46
4453 3 1997-05-29 48
4484 7 1997-03-17 50
-4609 3 1997-02-11 46
484 1 1997-03-06 49
484 3 1997-01-24 50
484 5 1997-03-05 48
485 1 1997-03-28 50
-4868 1 1997-04-29 47
4868 3 1997-04-23 49
4934 1 1997-05-20 48
4967 1 1997-05-27 50
-5090 2 1997-04-05 46
5152 2 1997-03-10 50
5158 4 1997-04-10 49
-5606 3 1997-03-11 46
-5606 7 1997-02-01 46
-5762 4 1997-03-02 47
581 3 1997-02-27 49
5829 5 1997-01-31 49
-5831 4 1997-02-24 46
-5895 2 1997-04-27 47
5895 3 1997-03-15 49
5952 1 1997-06-30 49
-705 1 1997-04-18 46
-836 3 1997-03-21 46
set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 510 Using index condition; Using where
+1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_quantity 9 NULL 349 Using index condition; Using where
set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "lineitem",
"access_type": "range",
"possible_keys": ["i_l_shipdate", "i_l_quantity"],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
- "rows": 510,
- "filtered": 10.07493782,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
+ "loops": 1,
+ "rows": 349,
+ "cost": "COST_REPLACED",
+ "filtered": 8.492922783,
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
}
]
@@ -255,12 +238,12 @@ EXPLAIN
}
set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 510 510.00 10.07 11.76 Using index condition; Using where
+1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_quantity 9 NULL 349 349.00 8.49 9.74 Using index condition; Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
ANALYZE
{
"query_optimization": {
@@ -268,6 +251,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -276,18 +260,20 @@ ANALYZE
"table_name": "lineitem",
"access_type": "range",
"possible_keys": ["i_l_shipdate", "i_l_quantity"],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
+ "loops": 1,
"r_loops": 1,
- "rows": 510,
- "r_rows": 510,
+ "rows": 349,
+ "r_rows": 349,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
- "filtered": 10.07493782,
- "r_filtered": 11.76470588,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "filtered": 8.492922783,
+ "r_filtered": 9.742120344,
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
}
]
@@ -295,21 +281,13 @@ ANALYZE
}
set statement optimizer_switch='rowid_filter=off' for SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45;
+l_quantity > 47;
l_orderkey l_linenumber l_shipdate l_quantity
-1121 5 1997-04-27 47
1121 6 1997-04-21 50
1441 7 1997-06-07 50
-1443 1 1997-02-05 47
1473 1 1997-05-05 50
-1568 2 1997-04-06 46
-1632 1 1997-01-25 47
-1632 3 1997-01-29 47
1954 7 1997-06-04 49
-1959 1 1997-05-05 46
2151 3 1997-01-20 49
-2177 5 1997-05-10 46
-2369 2 1997-01-02 47
2469 3 1997-01-11 48
2469 6 1997-03-03 49
2470 2 1997-06-02 50
@@ -322,41 +300,23 @@ l_orderkey l_linenumber l_shipdate l_quantity
3429 1 1997-04-08 48
3490 2 1997-06-27 50
3619 1 1997-01-22 49
-3619 3 1997-01-31 46
-3969 3 1997-05-29 46
4005 4 1997-01-31 49
-4036 1 1997-06-21 46
4066 4 1997-02-17 49
-4098 1 1997-01-26 46
-422 3 1997-06-21 46
-4258 3 1997-01-02 46
-4421 2 1997-04-21 46
-4421 3 1997-05-25 46
4453 3 1997-05-29 48
4484 7 1997-03-17 50
-4609 3 1997-02-11 46
484 1 1997-03-06 49
484 3 1997-01-24 50
484 5 1997-03-05 48
485 1 1997-03-28 50
-4868 1 1997-04-29 47
4868 3 1997-04-23 49
4934 1 1997-05-20 48
4967 1 1997-05-27 50
-5090 2 1997-04-05 46
5152 2 1997-03-10 50
5158 4 1997-04-10 49
-5606 3 1997-03-11 46
-5606 7 1997-02-01 46
-5762 4 1997-03-02 47
581 3 1997-02-27 49
5829 5 1997-01-31 49
-5831 4 1997-02-24 46
-5895 2 1997-04-27 47
5895 3 1997-03-15 49
5952 1 1997-06-30 49
-705 1 1997-04-18 46
-836 3 1997-03-21 46
set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
@@ -372,6 +332,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -386,7 +347,9 @@ EXPLAIN
"key": "i_l_shipdate",
"key_length": "4",
"used_key_parts": ["l_shipDATE"],
+ "loops": 1,
"rows": 98,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'",
"using_index": true
@@ -401,7 +364,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 98,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 4.733333111,
"attached_condition": "orders.o_totalprice between 200000 and 230000"
}
@@ -427,6 +392,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -443,9 +409,11 @@ ANALYZE
"key": "i_l_shipdate",
"key_length": "4",
"used_key_parts": ["l_shipDATE"],
+ "loops": 1,
"r_loops": 1,
"rows": 98,
"r_rows": 98,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -463,9 +431,12 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 98,
"r_loops": 98,
+ "r_table_loops": 96,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 4.733333111,
@@ -507,6 +478,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -521,7 +493,9 @@ EXPLAIN
"key": "i_l_shipdate",
"key_length": "4",
"used_key_parts": ["l_shipDATE"],
+ "loops": 1,
"rows": 98,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'",
"using_index": true
@@ -536,7 +510,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 98,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 4.733333111,
"attached_condition": "orders.o_totalprice between 200000 and 230000"
}
@@ -562,6 +538,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -578,9 +555,11 @@ ANALYZE
"key": "i_l_shipdate",
"key_length": "4",
"used_key_parts": ["l_shipDATE"],
+ "loops": 1,
"r_loops": 1,
"rows": 98,
"r_rows": 98,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -598,9 +577,12 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 98,
"r_loops": 98,
+ "r_table_loops": 96,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 4.733333111,
@@ -630,20 +612,21 @@ o_orderkey l_linenumber l_shipdate o_totalprice
set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 510 (10%) Using index condition; Using where; Using rowid filter
+1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_quantity|i_l_shipdate 9|4 NULL 349 (8%) Using index condition; Using where; Using rowid filter
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -656,21 +639,23 @@ EXPLAIN
"i_l_orderkey_quantity",
"i_l_quantity"
],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
"rowid_filter": {
"range": {
- "key": "i_l_quantity",
- "used_key_parts": ["l_quantity"]
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
},
- "rows": 605,
- "selectivity_pct": 10.07493755
+ "rows": 510,
+ "selectivity_pct": 8.492922565
},
- "rows": 510,
- "filtered": 10.07493782,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "loops": 1,
+ "rows": 349,
+ "cost": "COST_REPLACED",
+ "filtered": 8.492922783,
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
},
{
@@ -682,7 +667,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 29.64029975,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 9.600000381,
"attached_condition": "orders.o_totalprice between 180000 and 230000"
}
@@ -693,15 +680,15 @@ EXPLAIN
set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
-1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 510 (10%) 60.00 (11%) 10.07 100.00 Using index condition; Using where; Using rowid filter
-1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 9.60 26.67 Using where
+1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_quantity|i_l_shipdate 9|4 NULL 349 (8%) 34.00 (9%) 8.49 100.00 Using index condition; Using where; Using rowid filter
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 9.60 26.47 Using where
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
ANALYZE
{
@@ -710,6 +697,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -724,31 +712,33 @@ ANALYZE
"i_l_orderkey_quantity",
"i_l_quantity"
],
- "key": "i_l_shipdate",
- "key_length": "4",
- "used_key_parts": ["l_shipDATE"],
+ "key": "i_l_quantity",
+ "key_length": "9",
+ "used_key_parts": ["l_quantity"],
"rowid_filter": {
"range": {
- "key": "i_l_quantity",
- "used_key_parts": ["l_quantity"]
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
},
- "rows": 605,
- "selectivity_pct": 10.07493755,
- "r_rows": 605,
- "r_lookups": 510,
- "r_selectivity_pct": 11.76470588,
+ "rows": 510,
+ "selectivity_pct": 8.492922565,
+ "r_rows": 510,
+ "r_lookups": 349,
+ "r_selectivity_pct": 9.742120344,
"r_buffer_size": "REPLACED",
"r_filling_time_ms": "REPLACED"
},
+ "loops": 1,
"r_loops": 1,
- "rows": 510,
- "r_rows": 60,
+ "rows": 349,
+ "r_rows": 34,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
- "filtered": 10.07493782,
+ "filtered": 8.492922783,
"r_filtered": 100,
- "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
- "attached_condition": "lineitem.l_quantity > 45"
+ "index_condition": "lineitem.l_quantity > 47",
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
},
{
@@ -760,13 +750,15 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
- "r_loops": 60,
+ "loops": 29.64029975,
+ "r_loops": 34,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 9.600000381,
- "r_filtered": 26.66666667,
+ "r_filtered": 26.47058824,
"attached_condition": "orders.o_totalprice between 180000 and 230000"
}
}
@@ -776,29 +768,22 @@ ANALYZE
set statement optimizer_switch='rowid_filter=on' for SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
o_orderkey l_linenumber l_shipdate l_quantity o_totalprice
-1632 1 1997-01-25 47 183286.33
-1632 3 1997-01-29 47 183286.33
-2177 5 1997-05-10 46 183493.42
2469 3 1997-01-11 48 192074.23
2469 6 1997-03-03 49 192074.23
3619 1 1997-01-22 49 222274.54
-3619 3 1997-01-31 46 222274.54
484 1 1997-03-06 49 219920.62
484 3 1997-01-24 50 219920.62
484 5 1997-03-05 48 219920.62
4934 1 1997-05-20 48 180478.16
-5606 3 1997-03-11 46 219959.08
-5606 7 1997-02-01 46 219959.08
5829 5 1997-01-31 49 183734.56
-5895 2 1997-04-27 47 201419.83
5895 3 1997-03-15 49 201419.83
set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 144 Using where; Using index
@@ -806,12 +791,13 @@ id select_type table type possible_keys key key_len ref rows Extra
set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -821,7 +807,9 @@ EXPLAIN
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
+ "loops": 1,
"rows": 144,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "orders.o_totalprice between 180000 and 230000",
"using_index": true
@@ -842,9 +830,11 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 144,
"rows": 4,
- "filtered": 0.855656624,
- "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30' and lineitem.l_quantity > 45"
+ "cost": "COST_REPLACED",
+ "filtered": 0.493593663,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30' and lineitem.l_quantity > 47"
}
}
]
@@ -853,15 +843,15 @@ EXPLAIN
set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 144 144.00 100.00 100.00 Using where; Using index
-1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.62 0.86 1.68 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.62 0.49 0.94 Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
ANALYZE
{
@@ -870,6 +860,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -881,9 +872,11 @@ ANALYZE
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
+ "loops": 1,
"r_loops": 1,
"rows": 144,
"r_rows": 144,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -907,14 +900,16 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 144,
"r_loops": 144,
"rows": 4,
"r_rows": 6.625,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
- "filtered": 0.855656624,
- "r_filtered": 1.677148847,
- "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30' and lineitem.l_quantity > 45"
+ "filtered": 0.493593663,
+ "r_filtered": 0.943396226,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30' and lineitem.l_quantity > 47"
}
}
]
@@ -923,25 +918,26 @@ ANALYZE
set statement optimizer_switch='rowid_filter=off' for SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
-l_quantity > 45 AND
+l_quantity > 47 AND
o_totalprice between 180000 and 230000;
o_orderkey l_linenumber l_shipdate l_quantity o_totalprice
-1632 1 1997-01-25 47 183286.33
-1632 3 1997-01-29 47 183286.33
-2177 5 1997-05-10 46 183493.42
2469 3 1997-01-11 48 192074.23
2469 6 1997-03-03 49 192074.23
3619 1 1997-01-22 49 222274.54
-3619 3 1997-01-31 46 222274.54
484 1 1997-03-06 49 219920.62
484 3 1997-01-24 50 219920.62
484 5 1997-03-05 48 219920.62
4934 1 1997-05-20 48 180478.16
-5606 3 1997-03-11 46 219959.08
-5606 7 1997-02-01 46 219959.08
5829 5 1997-01-31 49 183734.56
-5895 2 1997-04-27 47 201419.83
5895 3 1997-03-15 49 201419.83
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT STRAIGHT_JOIN o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM lineitem JOIN orders ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 47 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_quantity|i_l_shipdate 9|4 NULL 349 (8%) Using index condition; Using where; Using rowid filter
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
@@ -957,6 +953,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -966,7 +963,9 @@ EXPLAIN
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
+ "loops": 1,
"rows": 71,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "orders.o_totalprice between 200000 and 230000",
"using_index": true
@@ -986,7 +985,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 71,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 8.492922783,
"attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
@@ -1012,6 +1013,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1023,9 +1025,11 @@ ANALYZE
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
+ "loops": 1,
"r_loops": 1,
"rows": 71,
"r_rows": 71,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1048,9 +1052,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 71,
"r_loops": 71,
"rows": 4,
"r_rows": 6.704225352,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 8.492922783,
@@ -1118,6 +1124,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1127,7 +1134,9 @@ EXPLAIN
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
+ "loops": 1,
"rows": 71,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "orders.o_totalprice between 200000 and 230000",
"using_index": true
@@ -1147,7 +1156,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 71,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 8.492922783,
"attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
}
@@ -1173,6 +1184,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1184,9 +1196,11 @@ ANALYZE
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
+ "loops": 1,
"r_loops": 1,
"rows": 71,
"r_rows": 71,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -1209,9 +1223,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 71,
"r_loops": 71,
"rows": 4,
"r_rows": 6.704225352,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 8.492922783,
@@ -1290,6 +1306,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1305,7 +1322,9 @@ EXPLAIN
"key": "i_l_receiptdate",
"key_length": "4",
"used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 0.566194832,
"index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
@@ -1320,7 +1339,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 5.666666508,
"attached_condition": "orders.o_totalprice between 200000 and 250000"
}
@@ -1350,6 +1371,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1367,9 +1389,11 @@ ANALYZE
"key": "i_l_receiptdate",
"key_length": "4",
"used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
"r_loops": 1,
"rows": 18,
"r_rows": 18,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 0.566194832,
@@ -1387,9 +1411,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
"r_loops": 7,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 5.666666508,
@@ -1427,6 +1453,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1442,7 +1469,9 @@ EXPLAIN
"key": "i_l_receiptdate",
"key_length": "4",
"used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 0.566194832,
"index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
@@ -1457,7 +1486,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 5.666666508,
"attached_condition": "orders.o_totalprice between 200000 and 250000"
}
@@ -1487,6 +1518,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1504,9 +1536,11 @@ ANALYZE
"key": "i_l_receiptdate",
"key_length": "4",
"used_key_parts": ["l_receiptDATE"],
+ "loops": 1,
"r_loops": 1,
"rows": 18,
"r_rows": 18,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 0.566194832,
@@ -1524,9 +1558,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "loops": 1,
"r_loops": 7,
"rows": 1,
"r_rows": 1,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 5.666666508,
@@ -1571,6 +1607,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1580,7 +1617,9 @@ EXPLAIN
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"rows": 41,
+ "cost": "COST_REPLACED",
"filtered": 3.333333254,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000"
@@ -1600,7 +1639,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.366666667,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 3.047460556,
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
}
@@ -1630,6 +1671,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1641,9 +1683,11 @@ ANALYZE
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"r_loops": 1,
"rows": 41,
"r_rows": 41,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 3.333333254,
@@ -1666,9 +1710,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.366666667,
"r_loops": 1,
"rows": 4,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 3.047460556,
@@ -1709,6 +1755,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -1718,7 +1765,9 @@ EXPLAIN
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"rows": 41,
+ "cost": "COST_REPLACED",
"filtered": 3.333333254,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000"
@@ -1738,7 +1787,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.366666667,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 3.047460556,
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
}
@@ -1768,6 +1819,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1779,9 +1831,11 @@ ANALYZE
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"r_loops": 1,
"rows": 41,
"r_rows": 41,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 3.333333254,
@@ -1804,9 +1858,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1.366666667,
"r_loops": 1,
"rows": 4,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 3.047460556,
@@ -1850,6 +1906,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
@@ -1864,7 +1921,9 @@ EXPLAIN
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"rows": 41,
+ "cost": "REPLACED",
"filtered": "REPLACED",
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
@@ -1884,7 +1943,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1,
"rows": 4,
+ "cost": "REPLACED",
"filtered": "REPLACED",
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
}
@@ -1914,6 +1975,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -1930,9 +1992,11 @@ ANALYZE
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"r_loops": 1,
"rows": 41,
"r_rows": 41,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": "REPLACED",
@@ -1955,9 +2019,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": "REPLACED",
@@ -1998,6 +2064,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
@@ -2012,7 +2079,9 @@ EXPLAIN
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"rows": 41,
+ "cost": "REPLACED",
"filtered": "REPLACED",
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
@@ -2032,7 +2101,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1,
"rows": 4,
+ "cost": "REPLACED",
"filtered": "REPLACED",
"attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
}
@@ -2062,6 +2133,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -2078,9 +2150,11 @@ ANALYZE
"key": "i_o_totaldiscount",
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
+ "loops": 1,
"r_loops": 1,
"rows": 41,
"r_rows": 41,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": "REPLACED",
@@ -2103,9 +2177,11 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["l_orderkey"],
"ref": ["dbt3_s001.orders.o_orderkey"],
+ "loops": 1,
"r_loops": 1,
"rows": 4,
"r_rows": 6,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": "REPLACED",
@@ -2133,703 +2209,6 @@ ALTER TABLE orders DROP COLUMN o_totaldiscount;
DROP VIEW v1;
DROP DATABASE dbt3_s001;
use test;
-#
-# MDEV-18816: potential range filter for one join table with
-# impossible WHERE for another
-#
-create table t1 (
-pk int not null primary key, c2 varchar(10) , i1 int,key (c2)
-) engine=myisam;
-insert into t1 values (1,'a',-5),(2,'a',null);
-create table t2 (
-pk int, i1 int, c1 varchar(30) , key c1 (c1(30)), key i1 (i1)
-) engine=myisam;
-insert into t2 values
-(1,-5,'a'),(2,null,'a'),(3,null,'a'),(4,null,'a'),(5,5,'a'),(6,null,'a'),
-(7,4,'a'),(8,55,'a'),(9,null,'a'),(10,null,'a'),(11,null,'a'),(12,-5,'a'),
-(13,-5,'a'),(14,null,'a'),(15,null,'a'),(16,-5,'a'),(17,-5,'a');
-select 1
-from t1
-left join
-t2 join t1 as t1_a on t2.i1 = t1_a.pk
-on t1.c2 = t2.c1
-where t1_a.pk is null and t1_a.i1 != 3;
-1
-explain extended select 1
-from t1
-left join
-t2 join t1 as t1_a on t2.i1 = t1_a.pk
-on t1.c2 = t2.c1
-where t1_a.pk is null and t1_a.i1 != 3;
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
-Warnings:
-Note 1003 select 1 AS `1` from `test`.`t1` join `test`.`t2` join `test`.`t1` `t1_a` where 0
-drop table t1,t2;
-#
-# MDEV-18640: TABLE::prune_range_rowid_filters: Conditional jump or
-# move depends on uninitialized value
-#
-CREATE TABLE t1 (
-pk INT, i INT, PRIMARY KEY (pk), KEY (pk,i)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (1,10), (7,70), (2,20);
-SELECT * FROM t1 WHERE pk < 5;
-pk i
-1 10
-2 20
-DROP TABLE t1;
-#
-# MDEV-18956: Possible rowid filter for subquery for which
-# in_to_exists strategy has been chosen
-#
-CREATE TABLE t1 (pk int) engine=myisam ;
-INSERT INTO t1 VALUES (1),(2);
-CREATE TABLE t2 (
-pk int auto_increment PRIMARY KEY,
-i1 int, i2 int, c2 varchar(1),
-KEY (i1), KEY (i2)
-) engine=myisam;
-INSERT INTO t2 VALUES
-(1,8,6,'t'),(2,5,7,'i'),(3,4,4,'h'),(4,207,38,'d'),(5,183,206,'b'),
-(6,7,null,'o'),(7,1,2,'j'),(8,17,36,'s'),(9,4,5,'q'),(10,0,6,'l'),
-(11,1,9,'j'),(12,5,6,'y'),(13,null,0,'i'),(14,7,7,'x'),(15,5,2,'u');
-SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
-pk
-EXPLAIN EXTENDED
-SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING
-2 SUBQUERY t2 ref i1,i2 i1 5 const 1 100.00 Using index condition; Using where
-Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` having 0
-DROP TABLE t1,t2;
-#
-# MDEV-19255: rowid range filter built for range condition
-# that uses in expensive subquery
-#
-CREATE TABLE t1 (
-pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(a1), KEY(b1)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES
-(10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'),
-(17,1,'f'),(18,5,'v'),(19,3,'f'),(20,2,'q'),(21,8,'y'),(22,0,'a'),(23,9,'w'),
-(24,3,'e'),(25,1,'b'),(26,9,'r'),(27,2,'k'),(28,5,'c'),(29,3,'k'),(30,9,'b'),
-(31,8,'j'),(32,1,'t'),(33,8,'n'),(34,3,'z'),(35,0,'u'),(36,3,'a'),(37,3,'g'),
-(38,1,'f'),(39,6,'p'),(40,6,'m'),(41,6,'t'),(42,7,'i'),(43,4,'h'),(44,3,'d'),
-(45,2,'b'),(46,1,'o'),(47,2,'j'),(48,6,'s'),(49,5,'q'),(50,6,'l'),(51,9,'j'),
-(52,6,'y'),(53,0,'i'),(54,7,'x'),(55,2,'u'),(56,6,'t'),(57,4,'b'),(58,5,'m'),
-(59,4,'x'),(60,8,'x'),(61,6,'v'),(62,8,'m'),(63,4,'j'),(64,8,'z'),(65,2,'a'),
-(66,9,'i'),(67,4,'g'),(68,8,'h'),(69,1,'p'),(70,8,'a'),(71,0,'x'),(72,2,'s'),
-(73,6,'k'),(74,0,'m'),(75,6,'e'),(76,9,'y'),(77,7,'d'),(78,7,'w'),(79,6,'y'),
-(80,9,'s'),(81,9,'x'),(82,6,'l'),(83,9,'f'),(84,8,'x'),(85,1,'p'),(86,7,'y'),
-(87,6,'p'),(88,1,'g'),(89,3,'c'),(90,5,'h'),(91,3,'p'),(92,2,'b'),(93,1,NULL),
-(94,3,NULL),(95,2,'y'),(96,7,'s'),(97,7,'x'),(98,6,'i'),(99,9,'t'),(100,5,'j'),
-(101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'),
-(107,8,'z'),(108,3,'k'),(109,65,NULL);
-CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM;
-INSERT INTO t2 VALUES (1,1,'i');
-INSERT INTO t2 SELECT * FROM t1;
-INSERT INTO t1 SELECT pk1+200, a1, b1 FROM t1;
-INSERT INTO t1 SELECT pk1+400, a1, b1 FROM t1;
-ANALYZE TABLE t1,t2 PERSISTENT FOR ALL;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status OK
-test.t2 analyze status Engine-independent statistics collected
-test.t2 analyze status OK
-SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 )
-WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
-pk1 a1 b1 pk2 a2 b2
-17 1 f 16 1 j
-37 3 g 36 3 a
-105 8 i 104 8 e
-EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 )
-WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 101 100.00 Using where
-1 PRIMARY t1 ref a1,b1 a1 5 test.t2.a2 36 28.75 Using where
-2 SUBQUERY t2 range PRIMARY PRIMARY 4 NULL 1 100.00 Using index condition
-Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t1`.`pk1` + 1 = `test`.`t2`.`pk2` + 2
-EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1+1 = pk2+2 AND a1 = a2 )
-WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
-EXPLAIN
-{
- "query_block": {
- "select_id": 1,
- "nested_loop": [
- {
- "table": {
- "table_name": "t2",
- "access_type": "ALL",
- "rows": 101,
- "filtered": 100,
- "attached_condition": "t2.a2 is not null"
- }
- },
- {
- "table": {
- "table_name": "t1",
- "access_type": "ref",
- "possible_keys": ["a1", "b1"],
- "key": "a1",
- "key_length": "5",
- "used_key_parts": ["a1"],
- "ref": ["test.t2.a2"],
- "rows": 36,
- "filtered": 28.75,
- "attached_condition": "t1.b1 <= (subquery#2) and t1.pk1 + 1 = t2.pk2 + 2"
- }
- }
- ],
- "subqueries": [
- {
- "query_block": {
- "select_id": 2,
- "nested_loop": [
- {
- "table": {
- "table_name": "t2",
- "access_type": "range",
- "possible_keys": ["PRIMARY"],
- "key": "PRIMARY",
- "key_length": "4",
- "used_key_parts": ["pk2"],
- "rows": 1,
- "filtered": 100,
- "index_condition": "t2.pk2 <= 1"
- }
- }
- ]
- }
- }
- ]
- }
-}
-DROP TABLE t1,t2;
-#
-# MDEV-21794: Optimizer flag rowid_filter leads to long query
-#
-create table t10(a int);
-insert into t10 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
-create table t11(a int);
-insert into t11 select A.a + B.a* 10 + C.a * 100 from t10 A, t10 B, t10 C;
-CREATE TABLE t1 (
-el_id int(10) unsigned NOT NULL ,
-el_index blob NOT NULL,
-el_index_60 varbinary(60) NOT NULL,
-filler blob,
-PRIMARY KEY (el_id),
-KEY el_index (el_index(60)),
-KEY el_index_60 (el_index_60,el_id)
-);
-insert into t1
-select
-A.a+1000*B.a,
-A.a+1000*B.a + 10000,
-A.a+1000*B.a + 10000,
-'filler-data-filler-data'
-from
-t11 A, t10 B;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze Warning Engine-independent statistics are not collected for column 'el_index'
-test.t1 analyze Warning Engine-independent statistics are not collected for column 'filler'
-test.t1 analyze status OK
-# This must not use rowid_filter with key=el_index|el_index_60:
-explain
-select * from t1
-where el_index like '10%' and (el_index_60 like '10%' or el_index_60 like '20%');
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range el_index,el_index_60 el_index 62 NULL 1000 Using where
-drop table t10, t11, t1;
-#
-# MDEV-22160: SIGSEGV in st_join_table::save_explain_data on SELECT
-#
-set @save_optimizer_switch= @@optimizer_switch;
-SET @@optimizer_switch="index_merge_sort_union=OFF";
-CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b));
-INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4);
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-INSERT INTO t1 SELECT * FROM t1;
-ANALYZE table t1 PERSISTENT FOR ALL;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status OK
-explain
-SELECT * FROM t1 WHERE a > 0 AND b=0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range|filter a,b a|b 5|5 NULL 64 (29%) Using index condition; Using where; Using rowid filter
-SELECT * FROM t1 WHERE a > 0 AND b=0;
-a b
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-1 0
-drop table t1;
-SET @@optimizer_switch=@save_optimizer_switch;
-#
-# MDEV-28846: Poor performance when rowid filter contains no elements
-#
-create table t1 (
-pk int primary key auto_increment,
-nm varchar(32),
-fl1 tinyint default 0,
-fl2 tinyint default 0,
-index idx1(nm, fl1),
-index idx2(fl2)
-) engine=myisam;
-create table name (
-pk int primary key auto_increment,
-nm bigint
-) engine=myisam;
-create table flag2 (
-pk int primary key auto_increment,
-fl2 tinyint
-) engine=myisam;
-insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
-insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19);
-insert into t1(nm,fl2)
-select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status Table is already up to date
-select '500%' as a;
-a
-500%
-set optimizer_switch='rowid_filter=on';
-explain
-select * from t1 where nm like '500%' AND fl2 = 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where
-analyze format=json
-select * from t1 where nm like '500%' AND fl2 = 0;
-ANALYZE
-{
- "query_optimization": {
- "r_total_time_ms": "REPLACED"
- },
- "query_block": {
- "select_id": 1,
- "r_loops": 1,
- "r_total_time_ms": "REPLACED",
- "nested_loop": [
- {
- "table": {
- "table_name": "t1",
- "access_type": "range",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx1",
- "key_length": "35",
- "used_key_parts": ["nm"],
- "r_loops": 1,
- "rows": 1,
- "r_rows": 1,
- "r_table_time_ms": "REPLACED",
- "r_other_time_ms": "REPLACED",
- "filtered": 49.20000076,
- "r_filtered": 100,
- "index_condition": "t1.nm like '500%'",
- "attached_condition": "t1.fl2 = 0"
- }
- }
- ]
- }
-}
-select * from t1 where nm like '500%' AND fl2 = 0;
-pk nm fl1 fl2
-517 500 0 0
-truncate table name;
-truncate table flag2;
-truncate table t1;
-insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
-insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19);
-insert into t1(nm,fl2)
-select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status Table is already up to date
-set optimizer_switch='rowid_filter=off';
-explain
-select * from t1 where nm like '500%' AND fl2 = 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where
-analyze format=json
-select * from t1 where nm like '500%' AND fl2 = 0;
-ANALYZE
-{
- "query_optimization": {
- "r_total_time_ms": "REPLACED"
- },
- "query_block": {
- "select_id": 1,
- "r_loops": 1,
- "r_total_time_ms": "REPLACED",
- "nested_loop": [
- {
- "table": {
- "table_name": "t1",
- "access_type": "range",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx1",
- "key_length": "35",
- "used_key_parts": ["nm"],
- "r_loops": 1,
- "rows": 1,
- "r_rows": 1,
- "r_table_time_ms": "REPLACED",
- "r_other_time_ms": "REPLACED",
- "filtered": 49.20000076,
- "r_filtered": 100,
- "index_condition": "t1.nm like '500%'",
- "attached_condition": "t1.fl2 = 0"
- }
- }
- ]
- }
-}
-select * from t1 where nm like '500%' AND fl2 = 0;
-pk nm fl1 fl2
-517 500 0 0
-truncate table name;
-truncate table flag2;
-truncate table t1;
-insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
-insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19);
-insert into t1(nm,fl2)
-select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status Table is already up to date
-select '607%' as a;
-a
-607%
-set optimizer_switch='rowid_filter=on';
-explain
-select * from t1 where nm like '607%' AND fl2 = 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where
-select * from t1 where nm like '607%' AND fl2 = 0;
-pk nm fl1 fl2
-721 607 0 0
-truncate table name;
-truncate table flag2;
-truncate table t1;
-insert into name(nm) select seq from seq_1_to_10000 order by rand(17);
-insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19);
-insert into t1(nm,fl2)
-select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status Table is already up to date
-select '75%' as a;
-a
-75%
-set optimizer_switch='rowid_filter=on';
-explain
-select * from t1 where nm like '75%' AND fl2 = 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 2|35 const 55 (1%) Using where; Using rowid filter
-analyze format=json
-select * from t1 where nm like '75%' AND fl2 = 0;
-ANALYZE
-{
- "query_optimization": {
- "r_total_time_ms": "REPLACED"
- },
- "query_block": {
- "select_id": 1,
- "r_loops": 1,
- "r_total_time_ms": "REPLACED",
- "nested_loop": [
- {
- "table": {
- "table_name": "t1",
- "access_type": "ref",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx2",
- "key_length": "2",
- "used_key_parts": ["fl2"],
- "ref": ["const"],
- "rowid_filter": {
- "range": {
- "key": "idx1",
- "used_key_parts": ["nm"]
- },
- "rows": 115,
- "selectivity_pct": 1.15,
- "r_rows": 111,
- "r_lookups": 100,
- "r_selectivity_pct": 2,
- "r_buffer_size": "REPLACED",
- "r_filling_time_ms": "REPLACED"
- },
- "r_loops": 1,
- "rows": 55,
- "r_rows": 2,
- "r_table_time_ms": "REPLACED",
- "r_other_time_ms": "REPLACED",
- "filtered": 1.149999976,
- "r_filtered": 100,
- "attached_condition": "t1.nm like '75%'"
- }
- }
- ]
- }
-}
-select * from t1 where nm like '75%' AND fl2 = 0;
-pk nm fl1 fl2
-4543 7503 0 0
-7373 7518 0 0
-drop table name, flag2;
-drop table t1;
-create table t1 (
-pk int primary key auto_increment,
-nm char(255),
-fl1 tinyint default 0,
-fl2 int default 0,
-index idx1(nm, fl1),
-index idx2(fl2)
-) engine=myisam;
-create table name (
-pk int primary key auto_increment,
-nm bigint
-) engine=myisam;
-create table flag2 (
-pk int primary key auto_increment,
-fl2 int
-) engine=myisam;
-insert into name(nm) select seq from seq_1_to_10000 order by rand(17);
-insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19);
-insert into t1(nm,fl2)
-select nm, fl2 from name, flag2 where name.pk = flag2.pk;
-analyze table t1 persistent for all;
-Table Op Msg_type Msg_text
-test.t1 analyze status Engine-independent statistics collected
-test.t1 analyze status Table is already up to date
-select * from t1
-where
-(
-nm like '3400%' or nm like '3402%' or nm like '3403%' or
-nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or
-nm like '3409%' or
-nm like '3411%' or nm like '3412%' or nm like '3413%' or
-nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or
-nm like '3418%' or nm like '3419%' or
-nm like '3421%' or nm like '3422%' or nm like '3423%' or
-nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or
-nm like '3428%' or nm like '3429%' or
-nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or
-nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or
-nm like '3439%' or
-nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or
-nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or
-nm like '3448%'
-) and fl2 = 0;
-pk nm fl1 fl2
-analyze format=json select * from t1
-where
-(
-nm like '3400%' or nm like '3402%' or nm like '3403%' or
-nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or
-nm like '3409%' or
-nm like '3411%' or nm like '3412%' or nm like '3413%' or
-nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or
-nm like '3418%' or nm like '3419%' or
-nm like '3421%' or nm like '3422%' or nm like '3423%' or
-nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or
-nm like '3428%' or nm like '3429%' or
-nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or
-nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or
-nm like '3439%' or
-nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or
-nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or
-nm like '3448%'
-) and fl2 = 0;
-ANALYZE
-{
- "query_optimization": {
- "r_total_time_ms": "REPLACED"
- },
- "query_block": {
- "select_id": 1,
- "r_loops": 1,
- "r_total_time_ms": "REPLACED",
- "nested_loop": [
- {
- "table": {
- "table_name": "t1",
- "access_type": "ref",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx2",
- "key_length": "5",
- "used_key_parts": ["fl2"],
- "ref": ["const"],
- "rowid_filter": {
- "range": {
- "key": "idx1",
- "used_key_parts": ["nm"]
- },
- "rows": 44,
- "selectivity_pct": 0.44,
- "r_rows": 44,
- "r_lookups": 1000,
- "r_selectivity_pct": 0,
- "r_buffer_size": "REPLACED",
- "r_filling_time_ms": "REPLACED"
- },
- "r_loops": 1,
- "rows": 863,
- "r_rows": 0,
- "r_table_time_ms": "REPLACED",
- "r_other_time_ms": "REPLACED",
- "filtered": 0.439999998,
- "r_filtered": 100,
- "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'"
- }
- }
- ]
- }
-}
-create table t0 select * from t1 where nm like '34%';
-delete from t1 using t1,t0 where t1.nm=t0.nm;
-analyze format=json select * from t1
-where
-(
-nm like '3400%' or nm like '3402%' or nm like '3403%' or
-nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or
-nm like '3409%' or
-nm like '3411%' or nm like '3412%' or nm like '3413%' or
-nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or
-nm like '3418%' or nm like '3419%' or
-nm like '3421%' or nm like '3422%' or nm like '3423%' or
-nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or
-nm like '3428%' or nm like '3429%' or
-nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or
-nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or
-nm like '3439%' or
-nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or
-nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or
-nm like '3448%'
-) and fl2 = 0;
-ANALYZE
-{
- "query_optimization": {
- "r_total_time_ms": "REPLACED"
- },
- "query_block": {
- "select_id": 1,
- "r_loops": 1,
- "r_total_time_ms": "REPLACED",
- "nested_loop": [
- {
- "table": {
- "table_name": "t1",
- "access_type": "ref",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx2",
- "key_length": "5",
- "used_key_parts": ["fl2"],
- "ref": ["const"],
- "rowid_filter": {
- "range": {
- "key": "idx1",
- "used_key_parts": ["nm"]
- },
- "rows": 44,
- "selectivity_pct": 0.44,
- "r_rows": 0,
- "r_lookups": 0,
- "r_selectivity_pct": 0,
- "r_buffer_size": "REPLACED",
- "r_filling_time_ms": "REPLACED"
- },
- "r_loops": 1,
- "rows": 853,
- "r_rows": 0,
- "filtered": 0.439999998,
- "r_filtered": 100,
- "attached_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'"
- }
- }
- ]
- }
-}
-drop table t0;
-set optimizer_switch='rowid_filter=default';
-drop table name, flag2;
-drop table t1;
-set @@use_stat_tables=@save_use_stat_tables;
SET GLOBAL innodb_stats_persistent=@save_stats_persistent;
#
# MDEV-18755: possible RORI-plan and possible plan with range filter
@@ -2854,6 +2233,7 @@ insert into t1 values
(81,'a','a',20),(82,'a','a',0),(83,'a','a',0),(84,'a','a',null),
(85,'a','a',-1),(86,'a','a',5),(87,'a','a',null),(88,'a','a',160),
(89,null,null,null),(90,'a','a',14785),(91,'a','a',0),(92,'a','a',null);
+insert into t1 values (100,null,null,null);
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
@@ -2870,8 +2250,8 @@ union
( select * from t1
where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a')));
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ref|filter f1,f2 f1|f1 13|13 const 1 (2%) Using index condition; Using where; Using rowid filter
-2 UNION t1 ref|filter f1,f2 f1|f1 13|13 const 1 (2%) Using index condition; Using where; Using rowid filter
+1 PRIMARY t1 ref|filter f1,f2 f1|f1 13|13 const 2 (3%) Using index condition; Using where; Using rowid filter
+2 UNION t1 ref|filter f1,f2 f1|f1 13|13 const 2 (3%) Using index condition; Using where; Using rowid filter
NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL
explain format=json ( select * from t1
where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a')))
@@ -2888,6 +2268,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2903,11 +2284,13 @@ EXPLAIN
"key": "f1",
"used_key_parts": ["f1"]
},
- "rows": 1,
- "selectivity_pct": 1.587301587
+ "rows": 2,
+ "selectivity_pct": 3.125
},
- "rows": 1,
- "filtered": 1.587301612,
+ "loops": 1,
+ "rows": 2,
+ "cost": "COST_REPLACED",
+ "filtered": 3.125,
"index_condition": "t1.f1 is null",
"attached_condition": "t1.f2 is null and (t1.f2 between 'a' and 'z' or t1.f1 = 'a')"
}
@@ -2919,6 +2302,7 @@ EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -2934,11 +2318,13 @@ EXPLAIN
"key": "f1",
"used_key_parts": ["f1"]
},
- "rows": 1,
- "selectivity_pct": 1.587301587
+ "rows": 2,
+ "selectivity_pct": 3.125
},
- "rows": 1,
- "filtered": 1.587301612,
+ "loops": 1,
+ "rows": 2,
+ "cost": "COST_REPLACED",
+ "filtered": 3.125,
"index_condition": "t1.f1 is null",
"attached_condition": "t1.f2 is null and (t1.f2 between 'a' and 'z' or t1.f1 = 'a')"
}
@@ -2966,8 +2352,8 @@ id y x
1 2 1
explain extended select * from t1 join t2 on t1.id = t2.x where t2.y = 2 and t1.id = 1;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 100.00 Using index
-1 SIMPLE t2 index_merge x,y y,x 5,5 NULL 1 100.00 Using intersect(y,x); Using where; Using index
+1 SIMPLE t1 const PRIMARY PRIMARY 4 const # #
+1 SIMPLE t2 index_merge x,y y,x 5,5 NULL # # Using intersect(y,x); Using where; Using index
Warnings:
Note 1003 select 1 AS `id`,`test`.`t2`.`y` AS `y`,`test`.`t2`.`x` AS `x` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`y` = 2 and `test`.`t2`.`x` = 1
drop table t1, t2;
@@ -3004,7 +2390,7 @@ count(*)
5
explain extended select count(*) from t1 where a between 21 and 30 and b=2;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ref b,a b 5 const 24 9.60 Using where
+1 SIMPLE t1 ref|filter b,a b|a 5|5 const 24 (10%) 9.60 Using where; Using rowid filter
Warnings:
Note 1003 select count(0) AS `count(*)` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` between 21 and 30
select * from t1 where a between 21 and 30 and b=2;
@@ -3041,12 +2427,12 @@ Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN EXTENDED
-SELECT a FROM t1 WHERE c < 'k' AND b > 't' ORDER BY a;
+SELECT a FROM t1 WHERE c < 'e' AND b > 't' ORDER BY a;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range|filter b,c b|c 13|1027 NULL 5 (42%) 41.67 Using index condition; Using where; Using filesort; Using rowid filter
+1 SIMPLE t1 range|filter b,c b|c 13|1027 NULL 5 (21%) 20.83 Using index condition; Using where; Using filesort; Using rowid filter
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`c` < 'k' and `test`.`t1`.`b` > 't' order by `test`.`t1`.`a`
-SELECT a FROM t1 WHERE c < 'k' AND b > 't' ORDER BY a;
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`c` < 'e' and `test`.`t1`.`b` > 't' order by `test`.`t1`.`a`
+SELECT a FROM t1 WHERE c < 'e' AND b > 't' ORDER BY a;
a
1
5
@@ -3513,7 +2899,7 @@ SELECT * FROM t1
WHERE (a BETWEEN 9 AND 10 OR a IS NULL) AND (b BETWEEN 9 AND 10 OR b = 9)
ORDER BY pk LIMIT 1;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 index a,b PRIMARY 4 NULL 75 54.55 Using where
+1 SIMPLE t1 index a,b PRIMARY 4 NULL 73 56.05 Using where
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a` between 9 and 10 or `test`.`t1`.`a` is null) and (`test`.`t1`.`b` between 9 and 10 or `test`.`t1`.`b` = 9) order by `test`.`t1`.`pk` limit 1
ANALYZE
@@ -3715,7 +3101,7 @@ fi.fh in (6311439873746261694,-397087483897438286,
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index
1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where
-1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 24 14.46 Using where
+1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 24 (14%) 14.46 Using where; Using rowid filter
Warnings:
Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774)
set statement optimizer_switch='rowid_filter=on' for select t.id, fi.*
@@ -3831,7 +3217,7 @@ fi.fh in (6311439873746261694,-397087483897438286,
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t index_merge PRIMARY,acli_rid,acli_tp acli_tp,acli_rid 2,767 NULL 2 100.00 Using intersect(acli_tp,acli_rid); Using where; Using index
1 SIMPLE a ref PRIMARY,acei_aclid acei_aclid 8 test.t.id 1 100.00 Using where; Using join buffer (flat, BKA join); Rowid-ordered scan
-1 SIMPLE fi ref filt_aceid,filt_fh filt_aceid 8 test.a.id 24 14.46 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan
+1 SIMPLE fi ref|filter filt_aceid,filt_fh filt_aceid|filt_fh 8|8 test.a.id 24 (14%) 14.46 Using where; Using join buffer (incremental, BKA join); Rowid-ordered scan; Using rowid filter
Warnings:
Note 1003 select `test`.`t`.`id` AS `id`,`test`.`fi`.`id` AS `id`,`test`.`fi`.`aceid` AS `aceid`,`test`.`fi`.`clid` AS `clid`,`test`.`fi`.`fh` AS `fh` from `test`.`acli` `t` join `test`.`acei` `a` join `test`.`filt` `fi` where `test`.`t`.`tp` = 121 and `test`.`a`.`atp` = 1 and `test`.`fi`.`aceid` = `test`.`a`.`id` and `test`.`a`.`aclid` = `test`.`t`.`id` and `test`.`t`.`rid` = 'B5FCC8C7111E4E3CBC21AAF5012F59C2' and `test`.`fi`.`fh` in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774)
set statement optimizer_switch='rowid_filter=on' for select t.id, fi.*
@@ -3892,6 +3278,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -3917,9 +3304,11 @@ ANALYZE
}
]
},
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -3938,9 +3327,11 @@ ANALYZE
"key_length": "8",
"used_key_parts": ["aclid"],
"ref": ["test.t.id"],
+ "loops": 2,
"r_loops": 1,
"rows": 1,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -3951,7 +3342,8 @@ ANALYZE
"join_type": "BKA",
"mrr_type": "Rowid-ordered scan",
"attached_condition": "a.atp = 1",
- "r_filtered": 100
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
},
{
@@ -3964,9 +3356,24 @@ ANALYZE
"key_length": "8",
"used_key_parts": ["aceid"],
"ref": ["test.a.id"],
+ "rowid_filter": {
+ "range": {
+ "key": "filt_fh",
+ "used_key_parts": ["fh"]
+ },
+ "rows": 81,
+ "selectivity_pct": 14.46428571,
+ "r_rows": 80,
+ "r_lookups": 80,
+ "r_selectivity_pct": 40,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "loops": 3.5384,
"r_loops": 1,
"rows": 24,
- "r_rows": 80,
+ "r_rows": 32,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 14.46428585,
@@ -3977,7 +3384,8 @@ ANALYZE
"join_type": "BKA",
"mrr_type": "Rowid-ordered scan",
"attached_condition": "fi.fh in (6311439873746261694,-397087483897438286,8518228073041491534,-5420422472375069774)",
- "r_filtered": 40
+ "r_filtered": 100,
+ "r_unpack_time_ms": "REPLACED"
}
}
]
@@ -4016,6 +3424,16 @@ test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+EXPLAIN EXTENDED SELECT * FROM t1
+WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1
+WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 60 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ref|filter c1,i1 c1|i1 3|5 func 38 (25%) 25.00 Using where; Full scan on NULL key; Using rowid filter
+2 DEPENDENT SUBQUERY a1 ALL NULL NULL NULL NULL 60 100.00 Using join buffer (flat, BNL join)
+Warnings:
+Note 1276 Field or reference 'test.t1.pk' of SELECT #2 was resolved in SELECT #1
+Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1` from `test`.`t1` where !<expr_cache><`test`.`t1`.`c1`,`test`.`t1`.`pk`>(<in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#2 */ select `test`.`t2`.`c1` from `test`.`t2` join `test`.`t1` `a1` where `test`.`t2`.`i1` = `test`.`t1`.`pk` and `test`.`t2`.`i1` between 3 and 5 and trigcond(<cache>(`test`.`t1`.`c1`) = `test`.`t2`.`c1`))))
SELECT * FROM t1
WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1
WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5);
@@ -4080,16 +3498,6 @@ pk c1
128 y
129 NULL
133 NULL
-EXPLAIN EXTENDED SELECT * FROM t1
-WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1
-WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 60 100.00 Using where
-2 DEPENDENT SUBQUERY t2 ref c1,i1 i1 5 test.t1.pk 20 100.00 Using index condition; Using where
-2 DEPENDENT SUBQUERY a1 ALL NULL NULL NULL NULL 60 100.00 Using join buffer (flat, BNL join)
-Warnings:
-Note 1276 Field or reference 'test.t1.pk' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1` from `test`.`t1` where !<expr_cache><`test`.`t1`.`c1`,`test`.`t1`.`pk`>(<in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#2 */ select `test`.`t2`.`c1` from `test`.`t2` join `test`.`t1` `a1` where `test`.`t2`.`i1` = `test`.`t1`.`pk` and `test`.`t2`.`i1` between 3 and 5 and trigcond(<cache>(`test`.`t1`.`c1`) = `test`.`t2`.`c1`))))
DROP TABLE t1,t2;
set global innodb_stats_persistent= @stats.save;
# End of 10.4 tests
diff --git a/mysql-test/main/rowid_filter_innodb.test b/mysql-test/main/rowid_filter_innodb.test
index 8705a0c9a12..77f2e7c1b59 100644
--- a/mysql-test/main/rowid_filter_innodb.test
+++ b/mysql-test/main/rowid_filter_innodb.test
@@ -37,6 +37,7 @@ insert into t1 values
(81,'a','a',20),(82,'a','a',0),(83,'a','a',0),(84,'a','a',null),
(85,'a','a',-1),(86,'a','a',5),(87,'a','a',null),(88,'a','a',160),
(89,null,null,null),(90,'a','a',14785),(91,'a','a',0),(92,'a','a',null);
+insert into t1 values (100,null,null,null);
analyze table t1;
@@ -49,6 +50,7 @@ let $q=
eval $q;
eval explain $q;
+--source include/explain-no-costs.inc
eval explain format=json $q;
drop table t1;
@@ -70,6 +72,7 @@ let $q=
select * from t1 join t2 on t1.id = t2.x where t2.y = 2 and t1.id = 1;
eval $q;
+--replace_column 9 # 10 #
eval explain extended $q;
drop table t1, t2;
@@ -132,9 +135,9 @@ INSERT INTO t1 VALUES
ANALYZE TABLE t1;
EXPLAIN EXTENDED
-SELECT a FROM t1 WHERE c < 'k' AND b > 't' ORDER BY a;
+SELECT a FROM t1 WHERE c < 'e' AND b > 't' ORDER BY a;
-SELECT a FROM t1 WHERE c < 'k' AND b > 't' ORDER BY a;
+SELECT a FROM t1 WHERE c < 'e' AND b > 't' ORDER BY a;
DROP TABLE t1;
SET GLOBAL innodb_stats_persistent= @stats.save;
@@ -643,8 +646,8 @@ SELECT * FROM t1
WHERE t1.c1 NOT IN (SELECT t2.c1 FROM t2, t1 AS a1
WHERE t2.i1 = t1.pk AND t2.i1 BETWEEN 3 AND 5);
-eval $q;
eval EXPLAIN EXTENDED $q;
+eval $q;
DROP TABLE t1,t2;
diff --git a/mysql-test/main/rowid_filter_innodb_debug.result b/mysql-test/main/rowid_filter_innodb_debug.result
index f82b29aa1e6..7e5446c9025 100644
--- a/mysql-test/main/rowid_filter_innodb_debug.result
+++ b/mysql-test/main/rowid_filter_innodb_debug.result
@@ -3,7 +3,7 @@ set default_storage_engine=innodb;
# MDEV-22761 KILL QUERY during rowid_filter, crashes
#
create table t2(a int);
-insert into t2 select * from seq_0_to_99;
+insert into t2 select seq from seq_1_to_100;
CREATE TABLE t3 (
key1 int ,
key2 int,
@@ -11,16 +11,11 @@ filler varchar(255),
KEY (key1),
KEY (key2)
);
+insert into t3 select seq,seq, 'filler-data-filler-data' from seq_1_to_2000;
select engine from information_schema.tables
where table_schema=database() and table_name='t3';
engine
InnoDB
-insert into t3
-select
-A.seq,
-B.seq,
-'filler-data-filler-data'
-from seq_0_to_99 A, seq_0_to_99 B;
analyze table t2,t3;
Table Op Msg_type Msg_text
test.t2 analyze status Engine-independent statistics collected
@@ -28,16 +23,16 @@ test.t2 analyze status OK
test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
explain
-select * from t2, t3
+select straight_join * from t2, t3
where
-t3.key1=t2.a and t3.key2 in (2,3);
+t3.key1=t2.a and t3.key2 between 2 and 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 100 Using where
-1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 100 (2%) Using where; Using rowid filter
+1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 1 (0%) Using where; Using rowid filter
set debug_sync='handler_rowid_filter_check SIGNAL at_rowid_filter_check WAIT_FOR go';
-select * from t2, t3
+select straight_join * from t2, t3
where
-t3.key1=t2.a and t3.key2 in (2,3);
+t3.key1=t2.a and t3.key2 between 2 and 10;
connect con1, localhost, root,,;
set debug_sync='now WAIT_FOR at_rowid_filter_check';
kill query $target_id;
diff --git a/mysql-test/main/rowid_filter_innodb_debug.test b/mysql-test/main/rowid_filter_innodb_debug.test
index 60381658eaf..99d32f9616d 100644
--- a/mysql-test/main/rowid_filter_innodb_debug.test
+++ b/mysql-test/main/rowid_filter_innodb_debug.test
@@ -1,7 +1,9 @@
--source include/have_innodb.inc
+--source include/have_sequence.inc
--source include/no_valgrind_without_big.inc
set default_storage_engine=innodb;
--source include/rowid_filter_debug_kill.inc
set default_storage_engine=default;
+
diff --git a/mysql-test/main/rowid_filter_myisam.result b/mysql-test/main/rowid_filter_myisam.result
index 927257d2cc7..7f05b6167fd 100644
--- a/mysql-test/main/rowid_filter_myisam.result
+++ b/mysql-test/main/rowid_filter_myisam.result
@@ -1,3 +1,4 @@
+SET optimizer_switch='rowid_filter=on';
#
# MDEV-22553: Assertion `info->lastpos == (~ (my_off_t) 0)' failed in mi_rkey with rowid_filer=on
#
@@ -19,3 +20,664 @@ ALTER TABLE t1 ENABLE KEYS;
SELECT * FROM t1 WHERE ( a BETWEEN 255 AND 270 OR f = 200 ) AND f IN ( 1, 4, 112, 143 ) AND d IN ('Montana', 'South Dakota');
a b c d e f
DROP TABLE t1;
+use test;
+#
+# MDEV-18816: potential range filter for one join table with
+# impossible WHERE for another
+#
+create table t1 (
+pk int not null primary key, c2 varchar(10) , i1 int,key (c2)
+) engine=myisam;
+insert into t1 values (1,'a',-5),(2,'a',null);
+create table t2 (
+pk int, i1 int, c1 varchar(30) , key c1 (c1(30)), key i1 (i1)
+) engine=myisam;
+insert into t2 values
+(1,-5,'a'),(2,null,'a'),(3,null,'a'),(4,null,'a'),(5,5,'a'),(6,null,'a'),
+(7,4,'a'),(8,55,'a'),(9,null,'a'),(10,null,'a'),(11,null,'a'),(12,-5,'a'),
+(13,-5,'a'),(14,null,'a'),(15,null,'a'),(16,-5,'a'),(17,-5,'a');
+select 1
+from t1
+left join
+t2 join t1 as t1_a on t2.i1 = t1_a.pk
+on t1.c2 = t2.c1
+where t1_a.pk is null and t1_a.i1 != 3;
+1
+explain extended select 1
+from t1
+left join
+t2 join t1 as t1_a on t2.i1 = t1_a.pk
+on t1.c2 = t2.c1
+where t1_a.pk is null and t1_a.i1 != 3;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+Warnings:
+Note 1003 select 1 AS `1` from `test`.`t1` join `test`.`t2` join `test`.`t1` `t1_a` where 0
+drop table t1,t2;
+#
+# MDEV-18640: TABLE::prune_range_rowid_filters: Conditional jump or
+# move depends on uninitialized value
+#
+CREATE TABLE t1 (
+pk INT, i INT, PRIMARY KEY (pk), KEY (pk,i)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,10), (7,70), (2,20);
+SELECT * FROM t1 WHERE pk < 5;
+pk i
+1 10
+2 20
+DROP TABLE t1;
+#
+# MDEV-18956: Possible rowid filter for subquery for which
+# in_to_exists strategy has been chosen
+#
+CREATE TABLE t1 (pk int) engine=myisam ;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (
+pk int auto_increment PRIMARY KEY,
+i1 int, i2 int, c2 varchar(1),
+KEY (i1), KEY (i2)
+) engine=myisam;
+INSERT INTO t2 VALUES
+(1,8,6,'t'),(2,5,7,'i'),(3,4,4,'h'),(4,207,38,'d'),(5,183,206,'b'),
+(6,7,null,'o'),(7,1,2,'j'),(8,17,36,'s'),(9,4,5,'q'),(10,0,6,'l'),
+(11,1,9,'j'),(12,5,6,'y'),(13,null,0,'i'),(14,7,7,'x'),(15,5,2,'u');
+SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
+pk
+EXPLAIN EXTENDED
+SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING
+2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL no matching row in const table
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` having 0
+DROP TABLE t1,t2;
+#
+# MDEV-19255: rowid range filter built for range condition
+# that uses in expensive subquery
+#
+CREATE TABLE t1 (
+pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES
+(10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'),
+(17,1,'f'),(18,5,'v'),(19,3,'f'),(20,2,'q'),(21,8,'y'),(22,0,'a'),(23,9,'w'),
+(24,3,'e'),(25,1,'b'),(26,9,'r'),(27,2,'k'),(28,5,'c'),(29,3,'k'),(30,9,'b'),
+(31,8,'j'),(32,1,'t'),(33,8,'n'),(34,3,'z'),(35,0,'u'),(36,3,'a'),(37,3,'g'),
+(38,1,'f'),(39,6,'p'),(40,6,'m'),(41,6,'t'),(42,7,'i'),(43,4,'h'),(44,3,'d'),
+(45,2,'b'),(46,1,'o'),(47,2,'j'),(48,6,'s'),(49,5,'q'),(50,6,'l'),(51,9,'j'),
+(52,6,'y'),(53,0,'i'),(54,7,'x'),(55,2,'u'),(56,6,'t'),(57,4,'b'),(58,5,'m'),
+(59,4,'x'),(60,8,'x'),(61,6,'v'),(62,8,'m'),(63,4,'j'),(64,8,'z'),(65,2,'a'),
+(66,9,'i'),(67,4,'g'),(68,8,'h'),(69,1,'p'),(70,8,'a'),(71,0,'x'),(72,2,'s'),
+(73,6,'k'),(74,0,'m'),(75,6,'e'),(76,9,'y'),(77,7,'d'),(78,7,'w'),(79,6,'y'),
+(80,9,'s'),(81,9,'x'),(82,6,'l'),(83,9,'f'),(84,8,'x'),(85,1,'p'),(86,7,'y'),
+(87,6,'p'),(88,1,'g'),(89,3,'c'),(90,5,'h'),(91,3,'p'),(92,2,'b'),(93,1,NULL),
+(94,3,NULL),(95,2,'y'),(96,7,'s'),(97,7,'x'),(98,6,'i'),(99,9,'t'),(100,5,'j'),
+(101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'),
+(107,8,'z'),(108,3,'k'),(109,65,NULL);
+CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1,1,'x');
+INSERT INTO t2 SELECT * FROM t1;
+SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 )
+WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
+pk1 a1 b1 pk2 a2 b2
+65 2 a 109 65 NULL
+EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 )
+WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 101 100.00 Using where
+1 PRIMARY t1 eq_ref PRIMARY,b1 PRIMARY 4 test.t2.a2 1 87.00 Using where
+2 SUBQUERY t2 range PRIMARY PRIMARY 4 NULL 1 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`pk1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t2`.`a2` <> `test`.`t2`.`pk2`
+EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 )
+WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "loops": 1,
+ "rows": 101,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "attached_condition": "t2.a2 <> t2.pk2 and t2.a2 is not null"
+ }
+ },
+ {
+ "table": {
+ "table_name": "t1",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "b1"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["pk1"],
+ "ref": ["test.t2.a2"],
+ "loops": 101,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 87,
+ "attached_condition": "t1.b1 <= (subquery#2)"
+ }
+ }
+ ],
+ "subqueries": [
+ {
+ "query_block": {
+ "select_id": 2,
+ "cost": "COST_REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t2",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["pk2"],
+ "loops": 1,
+ "rows": 1,
+ "cost": "COST_REPLACED",
+ "filtered": 100,
+ "index_condition": "t2.pk2 <= 1"
+ }
+ }
+ ]
+ }
+ }
+ ]
+ }
+}
+DROP TABLE t1,t2;
+#
+# MDEV-21794: Optimizer flag rowid_filter leads to long query
+#
+create table t10(a int);
+insert into t10 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t11(a int);
+insert into t11 select A.a + B.a* 10 + C.a * 100 from t10 A, t10 B, t10 C;
+CREATE TABLE t1 (
+el_id int(10) unsigned NOT NULL ,
+el_index blob NOT NULL,
+el_index_60 varbinary(60) NOT NULL,
+filler blob,
+PRIMARY KEY (el_id),
+KEY el_index (el_index(60)),
+KEY el_index_60 (el_index_60,el_id)
+);
+insert into t1
+select
+A.a+1000*B.a,
+A.a+1000*B.a + 10000,
+A.a+1000*B.a + 10000,
+'filler-data-filler-data'
+from
+t11 A, t10 B;
+analyze table t1 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'el_index'
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'filler'
+test.t1 analyze status Table is already up to date
+# This must not use rowid_filter with key=el_index|el_index_60:
+explain
+select * from t1
+where el_index like '10%' and (el_index_60 like '10%' or el_index_60 like '20%');
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range el_index,el_index_60 el_index 62 NULL 645 Using where
+drop table t10, t11, t1;
+#
+# MDEV-22160: SIGSEGV in st_join_table::save_explain_data on SELECT
+#
+set @save_optimizer_switch= @@optimizer_switch;
+SET @@optimizer_switch="index_merge_sort_union=OFF";
+CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b));
+INSERT INTO t1 VALUES (0,0),(0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4),(3,3),(3,4),(3,5),(8,8),(8,9),(1,0),(2,0),(0,0),(0,0);
+explain
+SELECT * FROM t1 WHERE a > 0 AND b=0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref|filter a,b b|a 5|5 const 7 (47%) Using where; Using rowid filter
+SELECT * FROM t1 WHERE a > 0 AND b=0;
+a b
+1 0
+1 0
+2 0
+drop table t1;
+SET @@optimizer_switch=@save_optimizer_switch;
+#
+# MDEV-21633
+# Assertion `tmp >= 0' failed in best_access_path with rowid_filter=ON
+#
+set @save_optimizer_switch= @@optimizer_switch;
+SET optimizer_switch='rowid_filter=on';
+CREATE TABLE t1 (
+pk INT AUTO_INCREMENT,
+a INT,
+b VARCHAR(8),
+KEY(a),
+PRIMARY KEY(pk),
+KEY (a,pk)
+) ENGINE=MyISAM;
+INSERT INTO t1 (a,b) VALUES
+(NULL,'d'),(9,'b'),(2,'x'),(5,'k'),(NULL,'d'),(3,'s'),(5,'k'),(1,'r'),
+(8,'l'),(3,'z'),(1,'c'),(1,'q'),(NULL,'x'),(NULL,'p'),(NULL,'z'),(7,'a'),
+(0,'i'),(3,'s'),(NULL,'h'),(4,'p'),(1,'i'),(4,'f'),(1,'c'),(NULL,'a'),
+(NULL,'x'),(1,'b'),(NULL,'n'),(NULL,'h'),(5,'i'),(6,'e'),(NULL,'i'),
+(7,'e'),(1,'r'),(NULL,'z'),(1,'i'),(14,'c'),(6,'u'),(3,'b'),(4,'z'),
+(2,'c'),(70,'d'),(NULL,'p'),(21,'j'),(6,'e'),(5,'c'),(13,'i'),(42,'d'),
+(80,'s'),(14,'t'),(9,'a'),(0,'2'),(0,NULL),(0,NULL),(0,NULL),(0,''),
+(0,''),(0,'1'),(0,''),(0,''),(0,''),(0,''),(0,NULL),(0,''),(0,''),(0,''),
+(0,NULL),(0,''),(0,''),(0,''),(0,''),(0,''),(0,''),(0,NULL),(0,NULL),
+(0,NULL),(0,''),(0,''),(0,''),(0,''),(0,NULL),(0,''),(0,NULL),(0,NULL),
+(0,''),(0,''),(0,''),(0,NULL),(0,''),(0,NULL),(0,''),(0,''),(0,''),(0,''),
+(0,''),(0,''),(0,''),(0,NULL),(0,''),(0,NULL),(0,'');
+CREATE TABLE t2 (c INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5),(6);
+SELECT * FROM t1 JOIN t2 WHERE a = c AND pk BETWEEN 4 AND 7 AND a BETWEEN 2 AND 12 AND b != 'foo';
+pk a b c
+6 3 s 3
+4 5 k 5
+7 5 k 5
+explain SELECT * FROM t1 JOIN t2 WHERE a = c AND pk BETWEEN 4 AND 7 AND a BETWEEN 2 AND 12 AND b != 'foo';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range|filter PRIMARY,a,a_2 PRIMARY|a 4|5 NULL 4 (11%) Using index condition; Using where; Using rowid filter
+1 SIMPLE t2 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
+SET optimizer_switch='rowid_filter=off';
+SELECT * FROM t1 JOIN t2 WHERE a = c AND pk BETWEEN 4 AND 7 AND a BETWEEN 2 AND 12 AND b != 'foo';
+pk a b c
+6 3 s 3
+4 5 k 5
+7 5 k 5
+SET @@optimizer_switch=@save_optimizer_switch;
+DROP TABLE t1, t2;
+#
+# MDEV-28846: Poor performance when rowid filter contains no elements
+#
+create table t1 (
+pk int primary key auto_increment,
+nm varchar(32),
+fl1 tinyint default 0,
+fl2 tinyint default 0,
+index idx1(nm, fl1),
+index idx2(fl2)
+) engine=myisam;
+create table name (
+pk int primary key auto_increment,
+nm bigint
+) engine=myisam;
+create table flag2 (
+pk int primary key auto_increment,
+fl2 tinyint
+) engine=myisam;
+insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
+insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19);
+insert into t1(nm,fl2)
+select nm, fl2 from name, flag2 where name.pk = flag2.pk;
+analyze table t1 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+select '500%' as a;
+a
+500%
+set optimizer_switch='rowid_filter=on';
+explain
+select * from t1 where nm like '500%' AND fl2 = 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where
+analyze format=json
+select * from t1 where nm like '500%' AND fl2 = 0;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t1",
+ "access_type": "range",
+ "possible_keys": ["idx1", "idx2"],
+ "key": "idx1",
+ "key_length": "35",
+ "used_key_parts": ["nm"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 1,
+ "r_rows": 1,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 49.20000076,
+ "r_filtered": 100,
+ "index_condition": "t1.nm like '500%'",
+ "attached_condition": "t1.fl2 = 0"
+ }
+ }
+ ]
+ }
+}
+select * from t1 where nm like '500%' AND fl2 = 0;
+pk nm fl1 fl2
+517 500 0 0
+truncate table name;
+truncate table flag2;
+truncate table t1;
+insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
+insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19);
+insert into t1(nm,fl2)
+select nm, fl2 from name, flag2 where name.pk = flag2.pk;
+analyze table t1 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+set optimizer_switch='rowid_filter=off';
+explain
+select * from t1 where nm like '500%' AND fl2 = 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where
+analyze format=json
+select * from t1 where nm like '500%' AND fl2 = 0;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t1",
+ "access_type": "range",
+ "possible_keys": ["idx1", "idx2"],
+ "key": "idx1",
+ "key_length": "35",
+ "used_key_parts": ["nm"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 1,
+ "r_rows": 1,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 49.20000076,
+ "r_filtered": 100,
+ "index_condition": "t1.nm like '500%'",
+ "attached_condition": "t1.fl2 = 0"
+ }
+ }
+ ]
+ }
+}
+select * from t1 where nm like '500%' AND fl2 = 0;
+pk nm fl1 fl2
+517 500 0 0
+truncate table name;
+truncate table flag2;
+truncate table t1;
+insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
+insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19);
+insert into t1(nm,fl2)
+select nm, fl2 from name, flag2 where name.pk = flag2.pk;
+analyze table t1 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+select '607%' as a;
+a
+607%
+set optimizer_switch='rowid_filter=on';
+explain
+select * from t1 where nm like '607%' AND fl2 = 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range idx1,idx2 idx1 35 NULL 1 Using index condition; Using where
+select * from t1 where nm like '607%' AND fl2 = 0;
+pk nm fl1 fl2
+721 607 0 0
+truncate table name;
+truncate table flag2;
+truncate table t1;
+insert into name(nm) select seq from seq_1_to_10000 order by rand(17);
+insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19);
+insert into t1(nm,fl2)
+select nm, fl2 from name, flag2 where name.pk = flag2.pk;
+analyze table t1 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+select '75%' as a;
+a
+75%
+set optimizer_switch='rowid_filter=on';
+explain
+select * from t1 where nm like '75%' AND fl2 = 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 2|35 const 55 (1%) Using where; Using rowid filter
+analyze format=json
+select * from t1 where nm like '75%' AND fl2 = 0;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ref",
+ "possible_keys": ["idx1", "idx2"],
+ "key": "idx2",
+ "key_length": "2",
+ "used_key_parts": ["fl2"],
+ "ref": ["const"],
+ "rowid_filter": {
+ "range": {
+ "key": "idx1",
+ "used_key_parts": ["nm"]
+ },
+ "rows": 115,
+ "selectivity_pct": 1.15,
+ "r_rows": 111,
+ "r_lookups": 100,
+ "r_selectivity_pct": 2,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 55,
+ "r_rows": 2,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 1.149999976,
+ "r_filtered": 100,
+ "attached_condition": "t1.nm like '75%'"
+ }
+ }
+ ]
+ }
+}
+select * from t1 where nm like '75%' AND fl2 = 0;
+pk nm fl1 fl2
+4543 7503 0 0
+7373 7518 0 0
+drop table name, flag2;
+drop table t1;
+create table t1 (
+pk int primary key auto_increment,
+nm char(255),
+fl1 tinyint default 0,
+fl2 int default 0,
+index idx1(nm, fl1),
+index idx2(fl2)
+) engine=myisam;
+create table name (
+pk int primary key auto_increment,
+nm bigint
+) engine=myisam;
+create table flag2 (
+pk int primary key auto_increment,
+fl2 int
+) engine=myisam;
+insert into name(nm) select seq from seq_1_to_10000 order by rand(17);
+insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19);
+insert into t1(nm,fl2)
+select nm, fl2 from name, flag2 where name.pk = flag2.pk;
+analyze table t1 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+select * from t1
+where
+(
+nm like '3400%' or nm like '3402%' or nm like '3403%' or
+nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or
+nm like '3409%' or
+nm like '3411%' or nm like '3412%' or nm like '3413%' or
+nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or
+nm like '3418%' or nm like '3419%' or
+nm like '3421%' or nm like '3422%' or nm like '3423%' or
+nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or
+nm like '3428%' or nm like '3429%' or
+nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or
+nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or
+nm like '3439%' or
+nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or
+nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or
+nm like '3448%'
+) and fl2 = 0;
+pk nm fl1 fl2
+analyze format=json select * from t1
+where
+(
+nm like '3400%' or nm like '3402%' or nm like '3403%' or
+nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or
+nm like '3409%' or
+nm like '3411%' or nm like '3412%' or nm like '3413%' or
+nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or
+nm like '3418%' or nm like '3419%' or
+nm like '3421%' or nm like '3422%' or nm like '3423%' or
+nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or
+nm like '3428%' or nm like '3429%' or
+nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or
+nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or
+nm like '3439%' or
+nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or
+nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or
+nm like '3448%'
+) and fl2 = 0;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t1",
+ "access_type": "range",
+ "possible_keys": ["idx1", "idx2"],
+ "key": "idx1",
+ "key_length": "256",
+ "used_key_parts": ["nm"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 44,
+ "r_rows": 44,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 8.630000114,
+ "r_filtered": 0,
+ "index_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'",
+ "attached_condition": "t1.fl2 = 0"
+ }
+ }
+ ]
+ }
+}
+create table t0 select * from t1 where nm like '34%';
+delete from t1 using t1,t0 where t1.nm=t0.nm;
+analyze format=json select * from t1
+where
+(
+nm like '3400%' or nm like '3402%' or nm like '3403%' or
+nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or
+nm like '3409%' or
+nm like '3411%' or nm like '3412%' or nm like '3413%' or
+nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or
+nm like '3418%' or nm like '3419%' or
+nm like '3421%' or nm like '3422%' or nm like '3423%' or
+nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or
+nm like '3428%' or nm like '3429%' or
+nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or
+nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or
+nm like '3439%' or
+nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or
+nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or
+nm like '3448%'
+) and fl2 = 0;
+ANALYZE
+{
+ "query_optimization": {
+ "r_total_time_ms": "REPLACED"
+ },
+ "query_block": {
+ "select_id": 1,
+ "cost": "REPLACED",
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "nested_loop": [
+ {
+ "table": {
+ "table_name": "t1",
+ "access_type": "range",
+ "possible_keys": ["idx1", "idx2"],
+ "key": "idx1",
+ "key_length": "256",
+ "used_key_parts": ["nm"],
+ "loops": 1,
+ "r_loops": 1,
+ "rows": 44,
+ "r_rows": 0,
+ "cost": "REPLACED",
+ "r_table_time_ms": "REPLACED",
+ "r_other_time_ms": "REPLACED",
+ "filtered": 8.529999733,
+ "r_filtered": 100,
+ "index_condition": "t1.nm like '3400%' or t1.nm like '3402%' or t1.nm like '3403%' or t1.nm like '3404%' or t1.nm like '3405%' or t1.nm like '3406%' or t1.nm like '3407%' or t1.nm like '3409%' or t1.nm like '3411%' or t1.nm like '3412%' or t1.nm like '3413%' or t1.nm like '3414%' or t1.nm like '3415%' or t1.nm like '3416%' or t1.nm like '3417%' or t1.nm like '3418%' or t1.nm like '3419%' or t1.nm like '3421%' or t1.nm like '3422%' or t1.nm like '3423%' or t1.nm like '3424%' or t1.nm like '3425%' or t1.nm like '3426%' or t1.nm like '3427%' or t1.nm like '3428%' or t1.nm like '3429%' or t1.nm like '3430%' or t1.nm like '3431%' or t1.nm like '3432%' or t1.nm like '3433%' or t1.nm like '3434%' or t1.nm like '3435%' or t1.nm like '3436%' or t1.nm like '3437%' or t1.nm like '3439%' or t1.nm like '3440%' or t1.nm like '3441%' or t1.nm like '3442%' or t1.nm like '3443%' or t1.nm like '3444%' or t1.nm like '3445%' or t1.nm like '3446%' or t1.nm like '3447%' or t1.nm like '3448%'",
+ "attached_condition": "t1.fl2 = 0"
+ }
+ }
+ ]
+ }
+}
+drop table t0;
+set optimizer_switch='rowid_filter=default';
+drop table name, flag2;
+drop table t1;
diff --git a/mysql-test/main/rowid_filter_myisam.test b/mysql-test/main/rowid_filter_myisam.test
index 3ea4dc26ea0..e6eeed83bac 100644
--- a/mysql-test/main/rowid_filter_myisam.test
+++ b/mysql-test/main/rowid_filter_myisam.test
@@ -1,3 +1,5 @@
+SET optimizer_switch='rowid_filter=on';
+
--echo #
--echo # MDEV-22553: Assertion `info->lastpos == (~ (my_off_t) 0)' failed in mi_rkey with rowid_filer=on
--echo #
@@ -1623,3 +1625,406 @@ ALTER TABLE t1 ENABLE KEYS;
--echo # Must not crash:
SELECT * FROM t1 WHERE ( a BETWEEN 255 AND 270 OR f = 200 ) AND f IN ( 1, 4, 112, 143 ) AND d IN ('Montana', 'South Dakota');
DROP TABLE t1;
+
+
+
+use test;
+
+--echo #
+--echo # MDEV-18816: potential range filter for one join table with
+--echo # impossible WHERE for another
+--echo #
+
+create table t1 (
+ pk int not null primary key, c2 varchar(10) , i1 int,key (c2)
+) engine=myisam;
+insert into t1 values (1,'a',-5),(2,'a',null);
+
+create table t2 (
+ pk int, i1 int, c1 varchar(30) , key c1 (c1(30)), key i1 (i1)
+) engine=myisam;
+insert into t2 values
+ (1,-5,'a'),(2,null,'a'),(3,null,'a'),(4,null,'a'),(5,5,'a'),(6,null,'a'),
+ (7,4,'a'),(8,55,'a'),(9,null,'a'),(10,null,'a'),(11,null,'a'),(12,-5,'a'),
+ (13,-5,'a'),(14,null,'a'),(15,null,'a'),(16,-5,'a'),(17,-5,'a');
+
+let $q=
+select 1
+ from t1
+ left join
+ t2 join t1 as t1_a on t2.i1 = t1_a.pk
+ on t1.c2 = t2.c1
+where t1_a.pk is null and t1_a.i1 != 3;
+
+eval $q;
+eval explain extended $q;
+
+drop table t1,t2;
+
+--echo #
+--echo # MDEV-18640: TABLE::prune_range_rowid_filters: Conditional jump or
+--echo # move depends on uninitialized value
+--echo #
+
+CREATE TABLE t1 (
+ pk INT, i INT, PRIMARY KEY (pk), KEY (pk,i)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,10), (7,70), (2,20);
+
+SELECT * FROM t1 WHERE pk < 5;
+
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-18956: Possible rowid filter for subquery for which
+--echo # in_to_exists strategy has been chosen
+--echo #
+
+CREATE TABLE t1 (pk int) engine=myisam ;
+INSERT INTO t1 VALUES (1),(2);
+
+CREATE TABLE t2 (
+ pk int auto_increment PRIMARY KEY,
+ i1 int, i2 int, c2 varchar(1),
+ KEY (i1), KEY (i2)
+) engine=myisam;
+
+INSERT INTO t2 VALUES
+ (1,8,6,'t'),(2,5,7,'i'),(3,4,4,'h'),(4,207,38,'d'),(5,183,206,'b'),
+ (6,7,null,'o'),(7,1,2,'j'),(8,17,36,'s'),(9,4,5,'q'),(10,0,6,'l'),
+ (11,1,9,'j'),(12,5,6,'y'),(13,null,0,'i'),(14,7,7,'x'),(15,5,2,'u');
+
+SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
+EXPLAIN EXTENDED
+SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
+
+DROP TABLE t1,t2;
+
+--echo #
+--echo # MDEV-19255: rowid range filter built for range condition
+--echo # that uses in expensive subquery
+--echo #
+
+CREATE TABLE t1 (
+ pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES
+(10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'),
+(17,1,'f'),(18,5,'v'),(19,3,'f'),(20,2,'q'),(21,8,'y'),(22,0,'a'),(23,9,'w'),
+(24,3,'e'),(25,1,'b'),(26,9,'r'),(27,2,'k'),(28,5,'c'),(29,3,'k'),(30,9,'b'),
+(31,8,'j'),(32,1,'t'),(33,8,'n'),(34,3,'z'),(35,0,'u'),(36,3,'a'),(37,3,'g'),
+(38,1,'f'),(39,6,'p'),(40,6,'m'),(41,6,'t'),(42,7,'i'),(43,4,'h'),(44,3,'d'),
+(45,2,'b'),(46,1,'o'),(47,2,'j'),(48,6,'s'),(49,5,'q'),(50,6,'l'),(51,9,'j'),
+(52,6,'y'),(53,0,'i'),(54,7,'x'),(55,2,'u'),(56,6,'t'),(57,4,'b'),(58,5,'m'),
+(59,4,'x'),(60,8,'x'),(61,6,'v'),(62,8,'m'),(63,4,'j'),(64,8,'z'),(65,2,'a'),
+(66,9,'i'),(67,4,'g'),(68,8,'h'),(69,1,'p'),(70,8,'a'),(71,0,'x'),(72,2,'s'),
+(73,6,'k'),(74,0,'m'),(75,6,'e'),(76,9,'y'),(77,7,'d'),(78,7,'w'),(79,6,'y'),
+(80,9,'s'),(81,9,'x'),(82,6,'l'),(83,9,'f'),(84,8,'x'),(85,1,'p'),(86,7,'y'),
+(87,6,'p'),(88,1,'g'),(89,3,'c'),(90,5,'h'),(91,3,'p'),(92,2,'b'),(93,1,NULL),
+(94,3,NULL),(95,2,'y'),(96,7,'s'),(97,7,'x'),(98,6,'i'),(99,9,'t'),(100,5,'j'),
+(101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'),
+(107,8,'z'),(108,3,'k'),(109,65,NULL);
+
+CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1,1,'x');
+INSERT INTO t2 SELECT * FROM t1;
+
+let $q=
+SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 )
+ WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
+
+eval $q;
+eval EXPLAIN EXTENDED $q;
+--source include/explain-no-costs.inc
+eval EXPLAIN FORMAT=JSON $q;
+
+DROP TABLE t1,t2;
+
+--echo #
+--echo # MDEV-21794: Optimizer flag rowid_filter leads to long query
+--echo #
+create table t10(a int);
+insert into t10 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t11(a int);
+insert into t11 select A.a + B.a* 10 + C.a * 100 from t10 A, t10 B, t10 C;
+
+CREATE TABLE t1 (
+ el_id int(10) unsigned NOT NULL ,
+ el_index blob NOT NULL,
+ el_index_60 varbinary(60) NOT NULL,
+ filler blob,
+
+ PRIMARY KEY (el_id),
+ KEY el_index (el_index(60)),
+ KEY el_index_60 (el_index_60,el_id)
+);
+
+insert into t1
+select
+ A.a+1000*B.a,
+ A.a+1000*B.a + 10000,
+ A.a+1000*B.a + 10000,
+ 'filler-data-filler-data'
+from
+ t11 A, t10 B;
+analyze table t1 persistent for all;
+
+--echo # This must not use rowid_filter with key=el_index|el_index_60:
+explain
+select * from t1
+where el_index like '10%' and (el_index_60 like '10%' or el_index_60 like '20%');
+
+drop table t10, t11, t1;
+
+
+--echo #
+--echo # MDEV-22160: SIGSEGV in st_join_table::save_explain_data on SELECT
+--echo #
+
+set @save_optimizer_switch= @@optimizer_switch;
+SET @@optimizer_switch="index_merge_sort_union=OFF";
+CREATE TABLE t1 (a INT, b INT, INDEX(a), INDEX(b));
+INSERT INTO t1 VALUES (0,0),(0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4),(3,3),(3,4),(3,5),(8,8),(8,9),(1,0),(2,0),(0,0),(0,0);
+explain
+SELECT * FROM t1 WHERE a > 0 AND b=0;
+SELECT * FROM t1 WHERE a > 0 AND b=0;
+drop table t1;
+SET @@optimizer_switch=@save_optimizer_switch;
+
+--echo #
+--echo # MDEV-21633
+--echo # Assertion `tmp >= 0' failed in best_access_path with rowid_filter=ON
+--echo #
+
+set @save_optimizer_switch= @@optimizer_switch;
+SET optimizer_switch='rowid_filter=on';
+
+CREATE TABLE t1 (
+ pk INT AUTO_INCREMENT,
+ a INT,
+ b VARCHAR(8),
+ KEY(a),
+ PRIMARY KEY(pk),
+ KEY (a,pk)
+) ENGINE=MyISAM;
+
+INSERT INTO t1 (a,b) VALUES
+ (NULL,'d'),(9,'b'),(2,'x'),(5,'k'),(NULL,'d'),(3,'s'),(5,'k'),(1,'r'),
+ (8,'l'),(3,'z'),(1,'c'),(1,'q'),(NULL,'x'),(NULL,'p'),(NULL,'z'),(7,'a'),
+ (0,'i'),(3,'s'),(NULL,'h'),(4,'p'),(1,'i'),(4,'f'),(1,'c'),(NULL,'a'),
+ (NULL,'x'),(1,'b'),(NULL,'n'),(NULL,'h'),(5,'i'),(6,'e'),(NULL,'i'),
+ (7,'e'),(1,'r'),(NULL,'z'),(1,'i'),(14,'c'),(6,'u'),(3,'b'),(4,'z'),
+ (2,'c'),(70,'d'),(NULL,'p'),(21,'j'),(6,'e'),(5,'c'),(13,'i'),(42,'d'),
+ (80,'s'),(14,'t'),(9,'a'),(0,'2'),(0,NULL),(0,NULL),(0,NULL),(0,''),
+ (0,''),(0,'1'),(0,''),(0,''),(0,''),(0,''),(0,NULL),(0,''),(0,''),(0,''),
+ (0,NULL),(0,''),(0,''),(0,''),(0,''),(0,''),(0,''),(0,NULL),(0,NULL),
+ (0,NULL),(0,''),(0,''),(0,''),(0,''),(0,NULL),(0,''),(0,NULL),(0,NULL),
+ (0,''),(0,''),(0,''),(0,NULL),(0,''),(0,NULL),(0,''),(0,''),(0,''),(0,''),
+ (0,''),(0,''),(0,''),(0,NULL),(0,''),(0,NULL),(0,'');
+
+CREATE TABLE t2 (c INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5),(6);
+
+SELECT * FROM t1 JOIN t2 WHERE a = c AND pk BETWEEN 4 AND 7 AND a BETWEEN 2 AND 12 AND b != 'foo';
+
+explain SELECT * FROM t1 JOIN t2 WHERE a = c AND pk BETWEEN 4 AND 7 AND a BETWEEN 2 AND 12 AND b != 'foo';
+
+SET optimizer_switch='rowid_filter=off';
+
+SELECT * FROM t1 JOIN t2 WHERE a = c AND pk BETWEEN 4 AND 7 AND a BETWEEN 2 AND 12 AND b != 'foo';
+
+SET @@optimizer_switch=@save_optimizer_switch;
+
+# Cleanup
+DROP TABLE t1, t2;
+
+--echo #
+--echo # MDEV-28846: Poor performance when rowid filter contains no elements
+--echo #
+
+--source include/have_sequence.inc
+
+create table t1 (
+ pk int primary key auto_increment,
+ nm varchar(32),
+ fl1 tinyint default 0,
+ fl2 tinyint default 0,
+ index idx1(nm, fl1),
+ index idx2(fl2)
+) engine=myisam;
+
+create table name (
+ pk int primary key auto_increment,
+ nm bigint
+) engine=myisam;
+
+create table flag2 (
+ pk int primary key auto_increment,
+ fl2 tinyint
+) engine=myisam;
+
+insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
+insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19);
+
+insert into t1(nm,fl2)
+ select nm, fl2 from name, flag2 where name.pk = flag2.pk;
+
+analyze table t1 persistent for all;
+
+let $a=
+`select concat((select nm from t1 where fl2=0 order by RAND(13) limit 1),'%')`;
+eval select '$a' as a;
+
+set optimizer_switch='rowid_filter=on';
+eval
+explain
+select * from t1 where nm like '$a' AND fl2 = 0;
+--source include/analyze-format.inc
+eval
+analyze format=json
+select * from t1 where nm like '$a' AND fl2 = 0;
+eval
+select * from t1 where nm like '$a' AND fl2 = 0;
+
+truncate table name;
+truncate table flag2;
+truncate table t1;
+
+insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
+insert into flag2(fl2) select seq mod 2 from seq_1_to_1000 order by rand(19);
+
+insert into t1(nm,fl2)
+ select nm, fl2 from name, flag2 where name.pk = flag2.pk;
+
+analyze table t1 persistent for all;
+
+set optimizer_switch='rowid_filter=off';
+eval
+explain
+select * from t1 where nm like '$a' AND fl2 = 0;
+--source include/analyze-format.inc
+eval
+analyze format=json
+select * from t1 where nm like '$a' AND fl2 = 0;
+eval
+select * from t1 where nm like '$a' AND fl2 = 0;
+
+truncate table name;
+truncate table flag2;
+truncate table t1;
+
+insert into name(nm) select seq from seq_1_to_1000 order by rand(17);
+insert into flag2(fl2) select seq mod 10 from seq_1_to_1000 order by rand(19);
+
+insert into t1(nm,fl2)
+ select nm, fl2 from name, flag2 where name.pk = flag2.pk;
+
+analyze table t1 persistent for all;
+
+let $a=
+`select concat((select nm from t1 where fl2=0 order by RAND(13) limit 1),'%')`;
+eval select '$a' as a;
+
+set optimizer_switch='rowid_filter=on';
+eval
+explain
+select * from t1 where nm like '$a' AND fl2 = 0;
+eval
+select * from t1 where nm like '$a' AND fl2 = 0;
+
+truncate table name;
+truncate table flag2;
+truncate table t1;
+
+insert into name(nm) select seq from seq_1_to_10000 order by rand(17);
+insert into flag2(fl2) select seq mod 100 from seq_1_to_10000 order by rand(19);
+
+insert into t1(nm,fl2)
+ select nm, fl2 from name, flag2 where name.pk = flag2.pk;
+
+analyze table t1 persistent for all;
+
+let $a=
+`select concat(left((select nm from t1 where fl2=0 order by RAND(13) limit 1),2),'%')`;
+eval select '$a' as a;
+
+set optimizer_switch='rowid_filter=on';
+eval
+explain
+select * from t1 where nm like '$a' AND fl2 = 0;
+--source include/analyze-format.inc
+eval
+analyze format=json
+select * from t1 where nm like '$a' AND fl2 = 0;
+eval
+select * from t1 where nm like '$a' AND fl2 = 0;
+
+drop table name, flag2;
+drop table t1;
+
+# This test shows that if the container is empty there are no lookups into it
+
+create table t1 (
+ pk int primary key auto_increment,
+ nm char(255),
+ fl1 tinyint default 0,
+ fl2 int default 0,
+ index idx1(nm, fl1),
+ index idx2(fl2)
+) engine=myisam;
+
+create table name (
+ pk int primary key auto_increment,
+ nm bigint
+) engine=myisam;
+
+create table flag2 (
+ pk int primary key auto_increment,
+ fl2 int
+) engine=myisam;
+
+insert into name(nm) select seq from seq_1_to_10000 order by rand(17);
+insert into flag2(fl2) select seq mod 10 from seq_1_to_10000 order by rand(19);
+
+insert into t1(nm,fl2)
+ select nm, fl2 from name, flag2 where name.pk = flag2.pk;
+
+analyze table t1 persistent for all;
+
+let $q=
+select * from t1
+where
+(
+ nm like '3400%' or nm like '3402%' or nm like '3403%' or
+ nm like '3404%' or nm like '3405%' or nm like '3406%' or nm like '3407%' or
+ nm like '3409%' or
+ nm like '3411%' or nm like '3412%' or nm like '3413%' or
+ nm like '3414%' or nm like '3415%' or nm like '3416%' or nm like '3417%' or
+ nm like '3418%' or nm like '3419%' or
+ nm like '3421%' or nm like '3422%' or nm like '3423%' or
+ nm like '3424%' or nm like '3425%' or nm like '3426%' or nm like '3427%' or
+ nm like '3428%' or nm like '3429%' or
+ nm like '3430%' or nm like '3431%' or nm like '3432%' or nm like '3433%' or
+ nm like '3434%' or nm like '3435%' or nm like '3436%' or nm like '3437%' or
+ nm like '3439%' or
+ nm like '3440%' or nm like '3441%' or nm like '3442%' or nm like '3443%' or
+ nm like '3444%' or nm like '3445%' or nm like '3446%' or nm like '3447%' or
+ nm like '3448%'
+) and fl2 = 0;
+
+eval $q;
+--source include/analyze-format.inc
+eval analyze format=json $q;
+
+create table t0 select * from t1 where nm like '34%';
+delete from t1 using t1,t0 where t1.nm=t0.nm;
+--source include/analyze-format.inc
+eval analyze format=json $q;
+
+drop table t0;
+
+set optimizer_switch='rowid_filter=default';
+
+drop table name, flag2;
+drop table t1;
diff --git a/mysql-test/main/rowid_filter_myisam_debug.result b/mysql-test/main/rowid_filter_myisam_debug.result
index 75a8fad6947..263a8e9970d 100644
--- a/mysql-test/main/rowid_filter_myisam_debug.result
+++ b/mysql-test/main/rowid_filter_myisam_debug.result
@@ -2,7 +2,7 @@
# MDEV-22761 KILL QUERY during rowid_filter, crashes
#
create table t2(a int);
-insert into t2 select * from seq_0_to_99;
+insert into t2 select seq from seq_1_to_100;
CREATE TABLE t3 (
key1 int ,
key2 int,
@@ -10,16 +10,11 @@ filler varchar(255),
KEY (key1),
KEY (key2)
);
+insert into t3 select seq,seq, 'filler-data-filler-data' from seq_1_to_2000;
select engine from information_schema.tables
where table_schema=database() and table_name='t3';
engine
MyISAM
-insert into t3
-select
-A.seq,
-B.seq,
-'filler-data-filler-data'
-from seq_0_to_99 A, seq_0_to_99 B;
analyze table t2,t3;
Table Op Msg_type Msg_text
test.t2 analyze status Engine-independent statistics collected
@@ -27,16 +22,16 @@ test.t2 analyze status OK
test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
explain
-select * from t2, t3
+select straight_join * from t2, t3
where
-t3.key1=t2.a and t3.key2 in (2,3);
+t3.key1=t2.a and t3.key2 between 2 and 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 100 Using where
-1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 100 (2%) Using where; Using rowid filter
+1 SIMPLE t3 ref|filter key1,key2 key1|key2 5|5 test.t2.a 1 (0%) Using where; Using rowid filter
set debug_sync='handler_rowid_filter_check SIGNAL at_rowid_filter_check WAIT_FOR go';
-select * from t2, t3
+select straight_join * from t2, t3
where
-t3.key1=t2.a and t3.key2 in (2,3);
+t3.key1=t2.a and t3.key2 between 2 and 10;
connect con1, localhost, root,,;
set debug_sync='now WAIT_FOR at_rowid_filter_check';
kill query $target_id;
diff --git a/mysql-test/main/rpl_mysql_upgrade_slave_repo_check.test b/mysql-test/main/rpl_mysql_upgrade_slave_repo_check.test
index 24b5f029e8d..f91cbaa36ea 100644
--- a/mysql-test/main/rpl_mysql_upgrade_slave_repo_check.test
+++ b/mysql-test/main/rpl_mysql_upgrade_slave_repo_check.test
@@ -96,7 +96,7 @@ EOF
--connection master
let $datadir= `select @@datadir`;
-remove_file $datadir/mysql_upgrade_info;
+remove_file $datadir/mariadb_upgrade_info;
TRUNCATE TABLE `mysql`.`slave_master_info`;
TRUNCATE TABLE `mysql`.`slave_relay_log_info`;
--remove_file $MYSQLTEST_VARDIR/log/mysql_upgrade_master.log
@@ -117,7 +117,7 @@ TRUNCATE TABLE `mysql`.`slave_relay_log_info`;
--echo "====== Clean up ======"
--connection master
let $datadir= `select @@datadir`;
-remove_file $datadir/mysql_upgrade_info;
+remove_file $datadir/mariadb_upgrade_info;
DROP TABLE `mysql`.`slave_master_info`, `mysql`.`slave_relay_log_info`;
--remove_file $MYSQLTEST_VARDIR/tmp/slave_table_repo_init.sql
diff --git a/mysql-test/main/select.result b/mysql-test/main/select.result
index 99844d05f2a..4f7afe933ff 100644
--- a/mysql-test/main/select.result
+++ b/mysql-test/main/select.result
@@ -603,6 +603,31 @@ explain select t3.t2nr,fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL fld1 NULL NULL NULL 1199 Using where; Using temporary; Using filesort
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.fld1 1 Using where; Using index
+#
+# Some test with ORDER BY and limit
+#
+explain select count(*) from t3 as t1,t3 where t1.period=t3.period order by t3.period;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index period period 4 NULL 41810 Using index
+1 SIMPLE t3 ref period period 4 test.t1.period 4181 Using index
+explain select sum(t1.price+t3.price) from t3 as t1,t3 where t1.period=t3.period order by t3.period;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL period NULL NULL NULL 41810
+1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using temporary; Using filesort
+1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period limit 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using temporary; Using filesort
+1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t1.period limit 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using temporary; Using filesort
+1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
+set @save_join_cache_level=@@join_cache_level;
+set @@join_cache_level=0;
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using filesort
@@ -615,6 +640,10 @@ explain select * from t3 as t1,t3 where t1.period=t3.period order by t1.period l
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index period period 4 NULL 1
1 SIMPLE t3 ref period period 4 test.t1.period 4181
+set @@join_cache_level=@save_join_cache_level;
+#
+# Search with a constant table.
+#
select period from t1;
period
9410
@@ -1378,18 +1407,28 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
delete from t2 where fld1=999999;
+#
+# Test left join optimization
+#
+set @save_join_cache_level=@@join_cache_level;
+set @@join_cache_level=0;
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 and t4.companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
+set @@join_cache_level=@save_join_cache_level;
+explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where; Using join buffer (flat, BNL join)
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
@@ -1426,6 +1465,9 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
+#
+# Joins with forms.
+#
select distinct t2.companynr,t4.companynr from t2,t4 where t2.companynr=t4.companynr+1;
companynr companynr
37 36
@@ -2364,16 +2406,16 @@ insert into t1 values (1,2), (2,2), (3,2), (4,2);
insert into t2 values (1,3), (2,3), (3,4), (4,4);
explain select * from t1 left join t2 on a=c where d in (4);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref c,d d 5 const 2
-1 SIMPLE t1 ALL a NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ref c,d d 5 const 2 Using where
+1 SIMPLE t1 ref a a 5 test.t2.c 1
select * from t1 left join t2 on a=c where d in (4);
a b c d
3 2 3 4
4 2 4 4
explain select * from t1 left join t2 on a=c where d = 4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref c,d d 5 const 2
-1 SIMPLE t1 ALL a NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ref c,d d 5 const 2 Using where
+1 SIMPLE t1 ref a a 5 test.t2.c 1
select * from t1 left join t2 on a=c where d = 4;
a b c d
3 2 3 4
@@ -2400,11 +2442,11 @@ INSERT INTO t2 VALUES ('one'),('two'),('three'),('four'),('five');
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 USE INDEX (a) ON t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref a a 23 test.t1.a 2 Using where
+1 SIMPLE t2 ref a a 23 test.t1.a 1 Using where
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 FORCE INDEX (a) ON t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref a a 23 test.t1.a 2 Using where
+1 SIMPLE t2 ref a a 23 test.t1.a 1 Using where
DROP TABLE t1, t2;
CREATE TABLE t1 ( city char(30) );
INSERT INTO t1 VALUES ('London');
@@ -3597,7 +3639,7 @@ CREATE TABLE t1(id int PRIMARY KEY, b int, e int);
CREATE TABLE t2(i int, a int, INDEX si(i), INDEX ai(a));
CREATE TABLE t3(a int PRIMARY KEY, c char(4), INDEX ci(c));
INSERT INTO t1 VALUES
-(1,10,19), (2,20,22), (4,41,42), (9,93,95), (7, 77,79),
+(1,10,19), (2,20,22), (4,41,42), (9,39,95), (7, 77,79),
(6,63,67), (5,55,58), (3,38,39), (8,81,89);
INSERT INTO t2 VALUES
(21,210), (41,410), (82,820), (83,830), (84,840),
@@ -3625,6 +3667,14 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
+EXPLAIN
+SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
+WHERE t1.id = 9 AND t2.i BETWEEN t1.b AND t1.e AND
+t3.a=t2.a AND t3.c IN ('bb','ee');
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
+1 SIMPLE t2 range si si 5 NULL 13 Using index condition; Using where
+1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@@ -3632,7 +3682,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where
-1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
+1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
EXPLAIN
SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@@ -3640,7 +3690,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where
-1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
+1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
DROP TABLE t1,t2,t3;
CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int);
CREATE TABLE t2 ( f11 int PRIMARY KEY );
@@ -3717,7 +3767,7 @@ COUNT(*)
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 4|10 const 2 (6%) Using where; Using rowid filter
+1 SIMPLE t1 index_merge idx1,idx2 idx2,idx1 4,10 NULL 1 Using intersect(idx2,idx1); Using where; Using index
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null=3 IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
@@ -3731,7 +3781,7 @@ CREATE UNIQUE INDEX idx1 ON t1(ID1_with_null,ID2_with_null);
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 4|10 const 2 (7%) Using where; Using rowid filter
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
@@ -3744,7 +3794,7 @@ EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND
(ID2_with_null=1 OR ID2_with_null=2);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
+1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter
DROP TABLE t1;
CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts));
INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00");
@@ -3876,7 +3926,7 @@ cc 3 7
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref name name 6 test.t1.name 2 Using where
+1 SIMPLE t2 ref name name 6 test.t1.name 1 Using where
SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
name name n
ccc NULL NULL
@@ -3969,7 +4019,7 @@ cc 3 7
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref name name 6 test.t1.name 2 Using where
+1 SIMPLE t2 ref name name 6 test.t1.name 1 Using where
SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
name name n
ccc NULL NULL
diff --git a/mysql-test/main/select.test b/mysql-test/main/select.test
index 332b5a36aea..bc06431a05a 100644
--- a/mysql-test/main/select.test
+++ b/mysql-test/main/select.test
@@ -1462,17 +1462,26 @@ select distinct fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2nr orde
explain select t3.t2nr,fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2nr order by t3.t2nr,fld3;
-#
-# Some test with ORDER BY and limit
-#
+--echo #
+--echo # Some test with ORDER BY and limit
+--echo #
+
+explain select count(*) from t3 as t1,t3 where t1.period=t3.period order by t3.period;
+explain select sum(t1.price+t3.price) from t3 as t1,t3 where t1.period=t3.period order by t3.period;
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period limit 10;
explain select * from t3 as t1,t3 where t1.period=t3.period order by t1.period limit 10;
+set @save_join_cache_level=@@join_cache_level;
+set @@join_cache_level=0;
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period limit 10;
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t1.period limit 10;
+set @@join_cache_level=@save_join_cache_level;
-#
-# Search with a constant table.
-#
+--echo #
+--echo # Search with a constant table.
+--echo #
select period from t1;
select period from t1 where period=1900;
@@ -1533,7 +1542,6 @@ INSERT INTO t4 (companynr, companyname) VALUES (68,'company 10');
INSERT INTO t4 (companynr, companyname) VALUES (50,'company 11');
INSERT INTO t4 (companynr, companyname) VALUES (00,'Unknown');
--enable_query_log
-
#
# Test of stright join to force a full join.
#
@@ -1565,12 +1573,17 @@ explain select companynr,companyname from t2 left join t4 using (companynr) wher
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr is null;
delete from t2 where fld1=999999;
-#
-# Test left join optimization
+--echo #
+--echo # Test left join optimization
+--echo #
+set @save_join_cache_level=@@join_cache_level;
+set @@join_cache_level=0;
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0;
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0;
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 and t4.companynr > 0;
+set @@join_cache_level=@save_join_cache_level;
+explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0;
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0;
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0;
@@ -1584,9 +1597,9 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0 or companynr > 0;
explain select companynr,companyname from t4 left join t2 using (companynr) where ifnull(companynr,1)>0;
-#
-# Joins with forms.
-#
+--echo #
+--echo # Joins with forms.
+--echo #
select distinct t2.companynr,t4.companynr from t2,t4 where t2.companynr=t4.companynr+1;
explain select distinct t2.companynr,t4.companynr from t2,t4 where t2.companynr=t4.companynr+1;
@@ -3141,7 +3154,7 @@ CREATE TABLE t2(i int, a int, INDEX si(i), INDEX ai(a));
CREATE TABLE t3(a int PRIMARY KEY, c char(4), INDEX ci(c));
INSERT INTO t1 VALUES
- (1,10,19), (2,20,22), (4,41,42), (9,93,95), (7, 77,79),
+ (1,10,19), (2,20,22), (4,41,42), (9,39,95), (7, 77,79),
(6,63,67), (5,55,58), (3,38,39), (8,81,89);
INSERT INTO t2 VALUES
(21,210), (41,410), (82,820), (83,830), (84,840),
@@ -3163,6 +3176,11 @@ SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND
t3.a=t2.a AND t3.c IN ('bb','ee') ;
+EXPLAIN
+SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
+ WHERE t1.id = 9 AND t2.i BETWEEN t1.b AND t1.e AND
+ t3.a=t2.a AND t3.c IN ('bb','ee');
+
EXPLAIN
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
diff --git a/mysql-test/main/select_jcl6.result b/mysql-test/main/select_jcl6.result
index a19e98d49a3..177edbc4acf 100644
--- a/mysql-test/main/select_jcl6.result
+++ b/mysql-test/main/select_jcl6.result
@@ -614,18 +614,47 @@ explain select t3.t2nr,fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL fld1 NULL NULL NULL 1199 Using where; Using temporary; Using filesort
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.fld1 1 Using where; Using index
+#
+# Some test with ORDER BY and limit
+#
+explain select count(*) from t3 as t1,t3 where t1.period=t3.period order by t3.period;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index period period 4 NULL 41810 Using index
+1 SIMPLE t3 ref period period 4 test.t1.period 4181 Using index
+explain select sum(t1.price+t3.price) from t3 as t1,t3 where t1.period=t3.period order by t3.period;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL period NULL NULL NULL 41810
+1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using temporary; Using filesort
-1 SIMPLE t3 ref period period 4 test.t1.period 4181 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period limit 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using temporary; Using filesort
-1 SIMPLE t1 ref period period 4 test.t3.period 4181 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
explain select * from t3 as t1,t3 where t1.period=t3.period order by t1.period limit 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using temporary; Using filesort
-1 SIMPLE t3 ref period period 4 test.t1.period 4181 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
+set @save_join_cache_level=@@join_cache_level;
+set @@join_cache_level=0;
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using filesort
+1 SIMPLE t3 ref period period 4 test.t1.period 4181
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period limit 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 index period period 4 NULL 1
+1 SIMPLE t1 ref period period 4 test.t3.period 4181
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t1.period limit 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index period period 4 NULL 1
+1 SIMPLE t3 ref period period 4 test.t1.period 4181
+set @@join_cache_level=@save_join_cache_level;
+#
+# Search with a constant table.
+#
select period from t1;
period
9410
@@ -1389,18 +1418,28 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
delete from t2 where fld1=999999;
+#
+# Test left join optimization
+#
+set @save_join_cache_level=@@join_cache_level;
+set @@join_cache_level=0;
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 and t4.companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+set @@join_cache_level=@save_join_cache_level;
+explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t2 hash_ALL NULL #hash#$hj 1 test.t4.companynr 1199 Using where; Using join buffer (flat, BNLH join)
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
@@ -1437,6 +1476,9 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where
1 SIMPLE t2 hash_ALL NULL #hash#$hj 1 test.t4.companynr 1199 Using where; Using join buffer (flat, BNLH join)
+#
+# Joins with forms.
+#
select distinct t2.companynr,t4.companynr from t2,t4 where t2.companynr=t4.companynr+1;
companynr companynr
37 36
@@ -2375,16 +2417,16 @@ insert into t1 values (1,2), (2,2), (3,2), (4,2);
insert into t2 values (1,3), (2,3), (3,4), (4,4);
explain select * from t1 left join t2 on a=c where d in (4);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref c,d d 5 const 2
-1 SIMPLE t1 ALL a NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ref c,d d 5 const 2 Using where
+1 SIMPLE t1 ref a a 5 test.t2.c 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
select * from t1 left join t2 on a=c where d in (4);
a b c d
3 2 3 4
4 2 4 4
explain select * from t1 left join t2 on a=c where d = 4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref c,d d 5 const 2
-1 SIMPLE t1 ALL a NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ref c,d d 5 const 2 Using where
+1 SIMPLE t1 ref a a 5 test.t2.c 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
select * from t1 left join t2 on a=c where d = 4;
a b c d
3 2 3 4
@@ -2411,11 +2453,11 @@ INSERT INTO t2 VALUES ('one'),('two'),('three'),('four'),('five');
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 USE INDEX (a) ON t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref a a 23 test.t1.a 2 Using where
+1 SIMPLE t2 ref a a 23 test.t1.a 1 Using where
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 FORCE INDEX (a) ON t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref a a 23 test.t1.a 2 Using where
+1 SIMPLE t2 ref a a 23 test.t1.a 1 Using where
DROP TABLE t1, t2;
CREATE TABLE t1 ( city char(30) );
INSERT INTO t1 VALUES ('London');
@@ -3608,7 +3650,7 @@ CREATE TABLE t1(id int PRIMARY KEY, b int, e int);
CREATE TABLE t2(i int, a int, INDEX si(i), INDEX ai(a));
CREATE TABLE t3(a int PRIMARY KEY, c char(4), INDEX ci(c));
INSERT INTO t1 VALUES
-(1,10,19), (2,20,22), (4,41,42), (9,93,95), (7, 77,79),
+(1,10,19), (2,20,22), (4,41,42), (9,39,95), (7, 77,79),
(6,63,67), (5,55,58), (3,38,39), (8,81,89);
INSERT INTO t2 VALUES
(21,210), (41,410), (82,820), (83,830), (84,840),
@@ -3636,6 +3678,14 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
+EXPLAIN
+SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
+WHERE t1.id = 9 AND t2.i BETWEEN t1.b AND t1.e AND
+t3.a=t2.a AND t3.c IN ('bb','ee');
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
+1 SIMPLE t3 range PRIMARY,ci ci 5 NULL 6 Using index condition; Rowid-ordered scan
+1 SIMPLE t2 hash_range si #hash#$hj:si 5:5 test.t3.a 13 Using where; Rowid-ordered scan; Using join buffer (flat, BNLH join)
EXPLAIN
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@@ -3643,7 +3693,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where; Rowid-ordered scan
-1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
+1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
EXPLAIN
SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@@ -3651,7 +3701,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where; Rowid-ordered scan
-1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
+1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
DROP TABLE t1,t2,t3;
CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int);
CREATE TABLE t2 ( f11 int PRIMARY KEY );
@@ -3728,7 +3778,7 @@ COUNT(*)
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 4|10 const 2 (6%) Using where; Using rowid filter
+1 SIMPLE t1 index_merge idx1,idx2 idx2,idx1 4,10 NULL 1 Using intersect(idx2,idx1); Using where; Using index
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null=3 IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
@@ -3742,7 +3792,7 @@ CREATE UNIQUE INDEX idx1 ON t1(ID1_with_null,ID2_with_null);
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 4|10 const 2 (7%) Using where; Using rowid filter
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
@@ -3755,7 +3805,7 @@ EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND
(ID2_with_null=1 OR ID2_with_null=2);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
+1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter
DROP TABLE t1;
CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts));
INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00");
@@ -3887,7 +3937,7 @@ cc 3 7
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref name name 6 test.t1.name 2 Using where
+1 SIMPLE t2 ref name name 6 test.t1.name 1 Using where
SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
name name n
ccc NULL NULL
@@ -3980,7 +4030,7 @@ cc 3 7
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref name name 6 test.t1.name 2 Using where
+1 SIMPLE t2 ref name name 6 test.t1.name 1 Using where
SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
name name n
ccc NULL NULL
diff --git a/mysql-test/main/select_pkeycache.result b/mysql-test/main/select_pkeycache.result
index 99844d05f2a..4f7afe933ff 100644
--- a/mysql-test/main/select_pkeycache.result
+++ b/mysql-test/main/select_pkeycache.result
@@ -603,6 +603,31 @@ explain select t3.t2nr,fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL fld1 NULL NULL NULL 1199 Using where; Using temporary; Using filesort
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.fld1 1 Using where; Using index
+#
+# Some test with ORDER BY and limit
+#
+explain select count(*) from t3 as t1,t3 where t1.period=t3.period order by t3.period;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index period period 4 NULL 41810 Using index
+1 SIMPLE t3 ref period period 4 test.t1.period 4181 Using index
+explain select sum(t1.price+t3.price) from t3 as t1,t3 where t1.period=t3.period order by t3.period;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL period NULL NULL NULL 41810
+1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using temporary; Using filesort
+1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period limit 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using temporary; Using filesort
+1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
+explain select * from t3 as t1,t3 where t1.period=t3.period order by t1.period limit 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using temporary; Using filesort
+1 SIMPLE t3 ALL period NULL NULL NULL 41810 Using where; Using join buffer (flat, BNL join)
+set @save_join_cache_level=@@join_cache_level;
+set @@join_cache_level=0;
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using filesort
@@ -615,6 +640,10 @@ explain select * from t3 as t1,t3 where t1.period=t3.period order by t1.period l
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index period period 4 NULL 1
1 SIMPLE t3 ref period period 4 test.t1.period 4181
+set @@join_cache_level=@save_join_cache_level;
+#
+# Search with a constant table.
+#
select period from t1;
period
9410
@@ -1378,18 +1407,28 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
delete from t2 where fld1=999999;
+#
+# Test left join optimization
+#
+set @save_join_cache_level=@@join_cache_level;
+set @@join_cache_level=0;
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 and t4.companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
+set @@join_cache_level=@save_join_cache_level;
+explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where; Using join buffer (flat, BNL join)
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
@@ -1426,6 +1465,9 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
+#
+# Joins with forms.
+#
select distinct t2.companynr,t4.companynr from t2,t4 where t2.companynr=t4.companynr+1;
companynr companynr
37 36
@@ -2364,16 +2406,16 @@ insert into t1 values (1,2), (2,2), (3,2), (4,2);
insert into t2 values (1,3), (2,3), (3,4), (4,4);
explain select * from t1 left join t2 on a=c where d in (4);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref c,d d 5 const 2
-1 SIMPLE t1 ALL a NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ref c,d d 5 const 2 Using where
+1 SIMPLE t1 ref a a 5 test.t2.c 1
select * from t1 left join t2 on a=c where d in (4);
a b c d
3 2 3 4
4 2 4 4
explain select * from t1 left join t2 on a=c where d = 4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref c,d d 5 const 2
-1 SIMPLE t1 ALL a NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ref c,d d 5 const 2 Using where
+1 SIMPLE t1 ref a a 5 test.t2.c 1
select * from t1 left join t2 on a=c where d = 4;
a b c d
3 2 3 4
@@ -2400,11 +2442,11 @@ INSERT INTO t2 VALUES ('one'),('two'),('three'),('four'),('five');
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 USE INDEX (a) ON t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref a a 23 test.t1.a 2 Using where
+1 SIMPLE t2 ref a a 23 test.t1.a 1 Using where
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 FORCE INDEX (a) ON t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref a a 23 test.t1.a 2 Using where
+1 SIMPLE t2 ref a a 23 test.t1.a 1 Using where
DROP TABLE t1, t2;
CREATE TABLE t1 ( city char(30) );
INSERT INTO t1 VALUES ('London');
@@ -3597,7 +3639,7 @@ CREATE TABLE t1(id int PRIMARY KEY, b int, e int);
CREATE TABLE t2(i int, a int, INDEX si(i), INDEX ai(a));
CREATE TABLE t3(a int PRIMARY KEY, c char(4), INDEX ci(c));
INSERT INTO t1 VALUES
-(1,10,19), (2,20,22), (4,41,42), (9,93,95), (7, 77,79),
+(1,10,19), (2,20,22), (4,41,42), (9,39,95), (7, 77,79),
(6,63,67), (5,55,58), (3,38,39), (8,81,89);
INSERT INTO t2 VALUES
(21,210), (41,410), (82,820), (83,830), (84,840),
@@ -3625,6 +3667,14 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
+EXPLAIN
+SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
+WHERE t1.id = 9 AND t2.i BETWEEN t1.b AND t1.e AND
+t3.a=t2.a AND t3.c IN ('bb','ee');
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
+1 SIMPLE t2 range si si 5 NULL 13 Using index condition; Using where
+1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@@ -3632,7 +3682,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where
-1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
+1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
EXPLAIN
SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@@ -3640,7 +3690,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where
-1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
+1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
DROP TABLE t1,t2,t3;
CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int);
CREATE TABLE t2 ( f11 int PRIMARY KEY );
@@ -3717,7 +3767,7 @@ COUNT(*)
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 4|10 const 2 (6%) Using where; Using rowid filter
+1 SIMPLE t1 index_merge idx1,idx2 idx2,idx1 4,10 NULL 1 Using intersect(idx2,idx1); Using where; Using index
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null=3 IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
@@ -3731,7 +3781,7 @@ CREATE UNIQUE INDEX idx1 ON t1(ID1_with_null,ID2_with_null);
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref|filter idx1,idx2 idx2|idx1 4|10 const 2 (7%) Using where; Using rowid filter
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
@@ -3744,7 +3794,7 @@ EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND
(ID2_with_null=1 OR ID2_with_null=2);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
+1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter
DROP TABLE t1;
CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts));
INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00");
@@ -3876,7 +3926,7 @@ cc 3 7
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref name name 6 test.t1.name 2 Using where
+1 SIMPLE t2 ref name name 6 test.t1.name 1 Using where
SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
name name n
ccc NULL NULL
@@ -3969,7 +4019,7 @@ cc 3 7
EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
-1 SIMPLE t2 ref name name 6 test.t1.name 2 Using where
+1 SIMPLE t2 ref name name 6 test.t1.name 1 Using where
SELECT * FROM t1 LEFT JOIN t2 ON t1.name=t2.name;
name name n
ccc NULL NULL
diff --git a/mysql-test/main/select_safe.result b/mysql-test/main/select_safe.result
index 649e2dc484e..a16d5439f48 100644
--- a/mysql-test/main/select_safe.result
+++ b/mysql-test/main/select_safe.result
@@ -67,6 +67,7 @@ test.t1 analyze status OK
insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a");
insert into t1 values (null,"b"),(null,"b"),(null,"c"),(null,"c"),(null,"d"),(null,"d"),(null,"e"),(null,"e"),(null,"a"),(null,"e");
insert into t1 values (null,"x"),(null,"x"),(null,"y"),(null,"y"),(null,"z"),(null,"z"),(null,"v"),(null,"v"),(null,"a"),(null,"v");
+set @@optimizer_where_cost=0.3;
explain select STRAIGHT_JOIN * from t1,t1 as t2 where t1.b=t2.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL b NULL NULL NULL 11
diff --git a/mysql-test/main/select_safe.test b/mysql-test/main/select_safe.test
index c76e337cd10..b9788cf2eb2 100644
--- a/mysql-test/main/select_safe.test
+++ b/mysql-test/main/select_safe.test
@@ -60,6 +60,7 @@ analyze table t1;
insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a");
insert into t1 values (null,"b"),(null,"b"),(null,"c"),(null,"c"),(null,"d"),(null,"d"),(null,"e"),(null,"e"),(null,"a"),(null,"e");
insert into t1 values (null,"x"),(null,"x"),(null,"y"),(null,"y"),(null,"z"),(null,"z"),(null,"v"),(null,"v"),(null,"a"),(null,"v");
+set @@optimizer_where_cost=0.3;
explain select STRAIGHT_JOIN * from t1,t1 as t2 where t1.b=t2.b;
set MAX_SEEKS_FOR_KEY=1;
explain select STRAIGHT_JOIN * from t1,t1 as t2 where t1.b=t2.b;
diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result
index 7e8a4aeb083..54c39d090d2 100644
--- a/mysql-test/main/selectivity.result
+++ b/mysql-test/main/selectivity.result
@@ -52,7 +52,7 @@ part, supplier, partsupp, nation, region
where
p_partkey = ps_partkey
and s_suppkey = ps_suppkey
-and p_size = 9
+and (p_size = 9 or p_size =19999)
and p_type like '%TIN'
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
@@ -72,18 +72,18 @@ and r_name = 'ASIA'
order by
s_acctbal desc, n_name, s_name, p_partkey;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 0.31 Using where; Using temporary; Using filesort
-1 PRIMARY partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey i_ps_partkey 4 dbt3_s001.part.p_partkey 3 100.00 Using where
+1 PRIMARY region ALL PRIMARY NULL NULL NULL 5 20.00 Using where; Using temporary; Using filesort
+1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 0.63 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where
1 PRIMARY supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.partsupp.ps_suppkey 1 100.00 Using where
1 PRIMARY nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.supplier.s_nationkey 1 100.00 Using where
-1 PRIMARY region eq_ref PRIMARY PRIMARY 4 dbt3_s001.nation.n_regionkey 1 20.00 Using where
-2 DEPENDENT SUBQUERY region ALL PRIMARY NULL NULL NULL 5 20.00 Using where
-2 DEPENDENT SUBQUERY partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey i_ps_partkey 4 dbt3_s001.part.p_partkey 3 100.00
+2 DEPENDENT SUBQUERY partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00
2 DEPENDENT SUBQUERY supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.partsupp.ps_suppkey 1 100.00 Using where
2 DEPENDENT SUBQUERY nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.supplier.s_nationkey 1 100.00 Using where
+2 DEPENDENT SUBQUERY region eq_ref PRIMARY PRIMARY 4 dbt3_s001.nation.n_regionkey 1 20.00 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.part.p_partkey' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_acctbal` AS `s_acctbal`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`nation`.`n_name` AS `n_name`,`dbt3_s001`.`part`.`p_partkey` AS `p_partkey`,`dbt3_s001`.`part`.`p_mfgr` AS `p_mfgr`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`dbt3_s001`.`supplier`.`s_comment` AS `s_comment` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`part`.`p_size` = 9 and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`region`.`r_regionkey` = `dbt3_s001`.`nation`.`n_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_type` like '%TIN' and `dbt3_s001`.`partsupp`.`ps_supplycost` = <expr_cache><`dbt3_s001`.`part`.`p_partkey`>((/* select#2 */ select min(`dbt3_s001`.`partsupp`.`ps_supplycost`) from `dbt3_s001`.`partsupp` join `dbt3_s001`.`supplier` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey`)) order by `dbt3_s001`.`supplier`.`s_acctbal` desc,`dbt3_s001`.`nation`.`n_name`,`dbt3_s001`.`supplier`.`s_name`,`dbt3_s001`.`part`.`p_partkey`
+Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_acctbal` AS `s_acctbal`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`nation`.`n_name` AS `n_name`,`dbt3_s001`.`part`.`p_partkey` AS `p_partkey`,`dbt3_s001`.`part`.`p_mfgr` AS `p_mfgr`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`dbt3_s001`.`supplier`.`s_comment` AS `s_comment` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and (`dbt3_s001`.`part`.`p_size` = 9 or `dbt3_s001`.`part`.`p_size` = 19999) and `dbt3_s001`.`part`.`p_type` like '%TIN' and `dbt3_s001`.`partsupp`.`ps_supplycost` = <expr_cache><`dbt3_s001`.`part`.`p_partkey`>((/* select#2 */ select min(`dbt3_s001`.`partsupp`.`ps_supplycost`) from `dbt3_s001`.`partsupp` join `dbt3_s001`.`supplier` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`region`.`r_regionkey` = `dbt3_s001`.`nation`.`n_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey`)) order by `dbt3_s001`.`supplier`.`s_acctbal` desc,`dbt3_s001`.`nation`.`n_name`,`dbt3_s001`.`supplier`.`s_name`,`dbt3_s001`.`part`.`p_partkey`
set optimizer_use_condition_selectivity=4;
explain extended
select
@@ -118,13 +118,13 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 100.00
1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 2.08 Using where; Using join buffer (flat, BNL join)
1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 100.00 Using where
-2 DEPENDENT SUBQUERY region ALL PRIMARY NULL NULL NULL 5 20.00 Using where
-2 DEPENDENT SUBQUERY partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey i_ps_partkey 4 dbt3_s001.part.p_partkey 3 100.00
+2 DEPENDENT SUBQUERY partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00
2 DEPENDENT SUBQUERY supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.partsupp.ps_suppkey 1 100.00 Using where
2 DEPENDENT SUBQUERY nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.supplier.s_nationkey 1 100.00 Using where
+2 DEPENDENT SUBQUERY region eq_ref PRIMARY PRIMARY 4 dbt3_s001.nation.n_regionkey 1 20.00 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.part.p_partkey' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_acctbal` AS `s_acctbal`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`nation`.`n_name` AS `n_name`,`dbt3_s001`.`part`.`p_partkey` AS `p_partkey`,`dbt3_s001`.`part`.`p_mfgr` AS `p_mfgr`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`dbt3_s001`.`supplier`.`s_comment` AS `s_comment` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`partsupp`.`ps_suppkey` = `dbt3_s001`.`supplier`.`s_suppkey` and `dbt3_s001`.`part`.`p_size` = 9 and `dbt3_s001`.`supplier`.`s_nationkey` = `dbt3_s001`.`nation`.`n_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_type` like '%TIN' and `dbt3_s001`.`partsupp`.`ps_supplycost` = <expr_cache><`dbt3_s001`.`part`.`p_partkey`>((/* select#2 */ select min(`dbt3_s001`.`partsupp`.`ps_supplycost`) from `dbt3_s001`.`partsupp` join `dbt3_s001`.`supplier` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey`)) order by `dbt3_s001`.`supplier`.`s_acctbal` desc,`dbt3_s001`.`nation`.`n_name`,`dbt3_s001`.`supplier`.`s_name`,`dbt3_s001`.`part`.`p_partkey`
+Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_acctbal` AS `s_acctbal`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`nation`.`n_name` AS `n_name`,`dbt3_s001`.`part`.`p_partkey` AS `p_partkey`,`dbt3_s001`.`part`.`p_mfgr` AS `p_mfgr`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`dbt3_s001`.`supplier`.`s_comment` AS `s_comment` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`partsupp`.`ps_suppkey` = `dbt3_s001`.`supplier`.`s_suppkey` and `dbt3_s001`.`part`.`p_size` = 9 and `dbt3_s001`.`supplier`.`s_nationkey` = `dbt3_s001`.`nation`.`n_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_type` like '%TIN' and `dbt3_s001`.`partsupp`.`ps_supplycost` = <expr_cache><`dbt3_s001`.`part`.`p_partkey`>((/* select#2 */ select min(`dbt3_s001`.`partsupp`.`ps_supplycost`) from `dbt3_s001`.`partsupp` join `dbt3_s001`.`supplier` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`region`.`r_regionkey` = `dbt3_s001`.`nation`.`n_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey`)) order by `dbt3_s001`.`supplier`.`s_acctbal` desc,`dbt3_s001`.`nation`.`n_name`,`dbt3_s001`.`supplier`.`s_name`,`dbt3_s001`.`part`.`p_partkey`
=== Q15 ===
create view revenue0 (supplier_no, total_revenue) as
select l_suppkey, sum(l_extendedprice * (1 - l_discount))
@@ -490,7 +490,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
2 MATERIALIZED part ALL PRIMARY NULL NULL NULL 200 100.00 Using where
2 MATERIALIZED partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey i_ps_partkey 4 dbt3_s001.part.p_partkey 3 100.00 Using where
-4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 100.00 Using where
+4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 15.14 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_suppkey' of SELECT #4 was resolved in SELECT #2
@@ -541,8 +541,8 @@ limit 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY nation ALL PRIMARY NULL NULL NULL 25 4.00 Using where; Using temporary; Using filesort
1 PRIMARY supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 100.00
-1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 4.17 Using where; Start temporary; Using join buffer (flat, BNL join)
-1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 100.00 Using where; End temporary
+1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 4.17 Using where
+1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 11.99 Using where; FirstMatch(supplier)
4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 15.14 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2
@@ -597,7 +597,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY nation ALL PRIMARY NULL NULL NULL 25 4.00 Using where; Using temporary; Using filesort
1 PRIMARY supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 100.00
1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 7.03 Using where
-1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 100.00 Using where; FirstMatch(supplier)
+1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 7.11 Using where; FirstMatch(supplier)
4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 15.14 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2
@@ -652,7 +652,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY nation ALL PRIMARY NULL NULL NULL 25 4.00 Using where; Using temporary; Using filesort
1 PRIMARY supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 100.00
1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 7.81 Using where
-1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 100.00 Using where; FirstMatch(supplier)
+1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 6.40 Using where; FirstMatch(supplier)
4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 15.14 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2
@@ -707,7 +707,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY nation ALL PRIMARY NULL NULL NULL 25 4.00 Using where; Using temporary; Using filesort
1 PRIMARY supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 100.00
1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 7.81 Using where
-1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 100.00 Using where; FirstMatch(supplier)
+1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 6.40 Using where; FirstMatch(supplier)
4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 15.14 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2
@@ -755,7 +755,7 @@ EXPLAIN EXTENDED
SELECT * FROM v1 INNER JOIN t2 ON ( a = c AND b = d );
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 SIMPLE t2 ref idx idx 5 test.t1.b 2 100.00 Using where
+1 SIMPLE t2 ref idx idx 5 test.t1.b 1 100.00 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`c` = `test`.`t1`.`a` and `test`.`t2`.`d` = `test`.`t1`.`b`
SELECT * FROM v1 INNER JOIN t2 ON ( a = c AND b = d );
@@ -1641,27 +1641,51 @@ drop function f1;
#
create table t1 (a int, b int, key (b), key (a));
insert into t1
-select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000;
-analyze table t1 persistent for all;
+select (rand(1)*1000)/30, (rand(1001)*1000)/40 from seq_1_to_1000;
+analyze table t1 ;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
+select count(*) from t1 where b=2;
+count(*)
+42
+select count(*) from t1 where a in (17,51,5);
+count(*)
+62
# Check what info the optimizer has about selectivities
explain extended select * from t1 use index () where a in (17,51,5);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 2.90 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 6.20 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` USE INDEX () where `test`.`t1`.`a` in (17,51,5)
explain extended select * from t1 use index () where b=2;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 2.40 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 4.20 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` USE INDEX () where `test`.`t1`.`b` = 2
# Now, the equality is used for ref access, while the range condition
# gives selectivity data
explain extended select * from t1 where a in (17,51,5) and b=2;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ref b,a b 5 const 24 2.90 Using where
+1 SIMPLE t1 ref|filter b,a b|a 5|5 const 42 (6%) 6.30 Using where; Using rowid filter
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (17,51,5)
+truncate table t1;
+insert into t1
+select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000;
+analyze table t1 ;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+select count(*) from t1 where b=2;
+count(*)
+59
+select count(*) from t1 where a in (17,51,5);
+count(*)
+29
+explain extended select * from t1 where a in (17,51,5) and b=2;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range|filter b,a a|b 5|5 NULL 29 (6%) 5.80 Using index condition; Using where; Using rowid filter
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (17,51,5)
drop table t1;
@@ -1790,7 +1814,7 @@ set optimizer_use_condition_selectivity=2;
explain extended select t1.b,t2.a,t3.a,t3.b from t1,t2,t3
where t1.c = t2.a AND t1.d = t3.a and t1.a = 50 and t1.b <= 100;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range a a 10 NULL 9 9.00 Using index condition; Using where
+1 SIMPLE t1 range a a 10 NULL 9 100.00 Using index condition; Using where
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.c 1 100.00 Using index
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.d 1 100.00
Warnings:
@@ -1808,6 +1832,18 @@ b a a b
7 7 8 8
8 8 9 9
9 9 10 10
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+explain extended select t1.b,t2.a,t3.a,t3.b from t1,t2,t3
+where t1.c = t2.a AND t1.d = t3.a and t1.a = 50 and t1.b <= 100;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range a a 10 NULL 9 100.00 Using index condition; Using where
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.c 1 100.00 Using index
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.d 1 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t2`.`a` = `test`.`t1`.`c` and `test`.`t3`.`a` = `test`.`t1`.`d` and `test`.`t1`.`a` = 50 and `test`.`t1`.`b` <= 100
set optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
drop table t1,t2,t3;
#
@@ -1947,3 +1983,85 @@ set use_stat_tables= @save_use_stat_tables;
DROP TABLE t1;
# End of 10.2 tests
set @@global.histogram_size=@save_histogram_size;
+#
+# MDEV-20595
+# Assertion `0 < sel && sel <= 2.0' failed in table_cond_selectivity
+#
+create table t1 (id int, a int, PRIMARY KEY(id), key(a));
+insert into t1 select seq,seq from seq_1_to_100;
+create table t2 (id int, a int, b int, PRIMARY KEY(id), key(a), key(b));
+insert into t2 select seq,seq,seq from seq_1_to_100;
+set optimizer_use_condition_selectivity=2;
+EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE A const PRIMARY,a PRIMARY 4 const 1
+1 SIMPLE B ref a a 5 const 1
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+drop table t1,t2;
+#
+# MDEV-30360 Assertion `cond_selectivity <= 1.000000001' failed in get_range_limit_read_cost
+# with LIMIT .. OFFSET
+#
+CREATE TABLE t1 (a INT, b VARCHAR(1), KEY(b), KEY(a)) engine=myisam;
+INSERT INTO t1 VALUES
+(3,'a'),(2,'g'),(5,'v'),(9,'n'),(6,'u'),
+(7,'s'),(0,'z'),(3,'z'),(NULL,'m'),(6,'r');
+CREATE TABLE t2 (pk INT PRIMARY KEY);
+INSERT INTO t2 VALUES (1),(2);
+SELECT STRAIGHT_JOIN pk FROM t1 JOIN t2 ON a = pk WHERE b >= 'A' ORDER BY t2.pk LIMIT 8 OFFSET 1;
+pk
+DROP TABLE t1, t2;
+#
+# MDEV-30659 Server crash on EXPLAIN SELECT/SELECT on table with
+# engine Aria for LooseScan Strategy
+#
+create table t1 (old_c1 integer, old_c2 integer, c1 integer,
+c2 integer, c3 integer) engine=aria;
+insert into t1(c1,c2,c3)
+values (1,1,1), (1,2,2), (1,3,3),
+(2,1,4), (2,2,5), (2,3,6),
+(2,4,7), (2,5,8);
+create index t1_c2 on t1 (c2,c1);
+explain select * from t1 where t1.c2 in (select a.c2 from t1 a) and
+c2 >= 3 order by c2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY a range t1_c2 t1_c2 5 NULL 5 Using where; Using index; LooseScan
+1 PRIMARY t1 ref t1_c2 t1_c2 5 test.a.c2 1
+drop table t1;
+create table t1 (old_c1 integer, old_c2 integer, c1 integer,
+c2 integer, c3 integer) engine=aria;
+create trigger trg_t1 before update on t1 for each row
+begin
+set new.old_c1=old.c1;
+set new.old_c2=old.c2;
+end;
+/
+insert into t1 (c1,c2,c3) values
+(1,1,1), (1,2,2), (1,3,3), (2,1,4), (2,2,5), (2,3,6), (2,4,7), (2,5,8);
+create index t1_c2 on t1 (c2,c1);
+analyze table t1 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+create table t2 as select * from t1;
+truncate table t1;
+insert into t1 select * from t2;
+explain select * from t1 where t1.c2 in (select a.c2 from t1 a) and c2 >= 3 order by c2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY a range t1_c2 t1_c2 5 NULL 5 Using where; Using index; LooseScan
+1 PRIMARY t1 ref t1_c2 t1_c2 5 test.a.c2 1
+drop trigger trg_t1;
+drop table t1,t2;
+create table t1 (old_c1 integer, old_c2 integer, c1 integer,
+c2 integer, c3 integer) engine=aria;
+insert into t1 (c1,c2,c3) values
+(1,1,1), (1,2,2), (1,3,3), (2,1,4), (2,2,5), (2,3,6), (2,4,7), (2,5,8);
+create index t1_c2 on t1 (c2,c1);
+create table t2 as select * from t1;
+truncate table t1;
+insert into t1 select * from t2;
+explain select * from t1 where t1.c2 in (select a.c2 from t1 a) and c2 >= 3 order by c2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY a range t1_c2 t1_c2 5 NULL 5 Using where; Using index; LooseScan
+1 PRIMARY t1 ref t1_c2 t1_c2 5 test.a.c2 2
+drop table t1,t2;
diff --git a/mysql-test/main/selectivity.test b/mysql-test/main/selectivity.test
index 4e4513d09d6..6957773fc8e 100644
--- a/mysql-test/main/selectivity.test
+++ b/mysql-test/main/selectivity.test
@@ -67,7 +67,9 @@ customer, lineitem, nation, orders, part, partsupp, region, supplier;
--enable_query_log
--echo === Q2 ===
-
+# "or p_size =19999" is added to avoid symmetry between
+# region (5 rows * 20% selectivity) = 1 and
+# part (200 rows * 0.5% selectivity) = 1
set optimizer_use_condition_selectivity=5;
explain extended
select
@@ -77,7 +79,7 @@ from
where
p_partkey = ps_partkey
and s_suppkey = ps_suppkey
- and p_size = 9
+ and (p_size = 9 or p_size =19999)
and p_type like '%TIN'
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
@@ -1111,8 +1113,10 @@ drop function f1;
--echo #
create table t1 (a int, b int, key (b), key (a));
insert into t1
-select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000;
-analyze table t1 persistent for all;
+select (rand(1)*1000)/30, (rand(1001)*1000)/40 from seq_1_to_1000;
+analyze table t1 ;
+select count(*) from t1 where b=2;
+select count(*) from t1 where a in (17,51,5);
--echo # Check what info the optimizer has about selectivities
explain extended select * from t1 use index () where a in (17,51,5);
@@ -1121,6 +1125,13 @@ explain extended select * from t1 use index () where b=2;
--echo # Now, the equality is used for ref access, while the range condition
--echo # gives selectivity data
explain extended select * from t1 where a in (17,51,5) and b=2;
+truncate table t1;
+insert into t1
+select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000;
+analyze table t1 ;
+select count(*) from t1 where b=2;
+select count(*) from t1 where a in (17,51,5);
+explain extended select * from t1 where a in (17,51,5) and b=2;
drop table t1;
set use_stat_tables= @save_use_stat_tables;
@@ -1232,17 +1243,16 @@ eval $query;
set optimizer_use_condition_selectivity=2;
eval explain extended $query;
eval $query;
+analyze table t1;
+eval explain extended $query;
set optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
drop table t1,t2,t3;
-
--echo #
--echo # MDEV-20519: Query plan regression with optimizer_use_condition_selectivity=4
--echo #
-
-
create table t1 (id int, a int, PRIMARY KEY(id), key(a));
insert into t1 select seq,seq from seq_1_to_100;
@@ -1334,3 +1344,86 @@ DROP TABLE t1;
#
--source include/restore_charset.inc
set @@global.histogram_size=@save_histogram_size;
+
+--echo #
+--echo # MDEV-20595
+--echo # Assertion `0 < sel && sel <= 2.0' failed in table_cond_selectivity
+--echo #
+
+create table t1 (id int, a int, PRIMARY KEY(id), key(a));
+insert into t1 select seq,seq from seq_1_to_100;
+create table t2 (id int, a int, b int, PRIMARY KEY(id), key(a), key(b));
+insert into t2 select seq,seq,seq from seq_1_to_100;
+
+set optimizer_use_condition_selectivity=2;
+EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65;
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+drop table t1,t2;
+
+--echo #
+--echo # MDEV-30360 Assertion `cond_selectivity <= 1.000000001' failed in get_range_limit_read_cost
+--echo # with LIMIT .. OFFSET
+--echo #
+
+CREATE TABLE t1 (a INT, b VARCHAR(1), KEY(b), KEY(a)) engine=myisam;
+INSERT INTO t1 VALUES
+(3,'a'),(2,'g'),(5,'v'),(9,'n'),(6,'u'),
+(7,'s'),(0,'z'),(3,'z'),(NULL,'m'),(6,'r');
+
+CREATE TABLE t2 (pk INT PRIMARY KEY);
+INSERT INTO t2 VALUES (1),(2);
+
+SELECT STRAIGHT_JOIN pk FROM t1 JOIN t2 ON a = pk WHERE b >= 'A' ORDER BY t2.pk LIMIT 8 OFFSET 1;
+
+DROP TABLE t1, t2;
+
+--echo #
+--echo # MDEV-30659 Server crash on EXPLAIN SELECT/SELECT on table with
+--echo # engine Aria for LooseScan Strategy
+--echo #
+
+create table t1 (old_c1 integer, old_c2 integer, c1 integer,
+ c2 integer, c3 integer) engine=aria;
+insert into t1(c1,c2,c3)
+ values (1,1,1), (1,2,2), (1,3,3),
+ (2,1,4), (2,2,5), (2,3,6),
+ (2,4,7), (2,5,8);
+create index t1_c2 on t1 (c2,c1);
+
+explain select * from t1 where t1.c2 in (select a.c2 from t1 a) and
+c2 >= 3 order by c2;
+drop table t1;
+
+create table t1 (old_c1 integer, old_c2 integer, c1 integer,
+ c2 integer, c3 integer) engine=aria;
+
+delimiter /;
+create trigger trg_t1 before update on t1 for each row
+begin
+ set new.old_c1=old.c1;
+ set new.old_c2=old.c2;
+end;
+/
+delimiter ;/
+
+insert into t1 (c1,c2,c3) values
+ (1,1,1), (1,2,2), (1,3,3), (2,1,4), (2,2,5), (2,3,6), (2,4,7), (2,5,8);
+create index t1_c2 on t1 (c2,c1);
+analyze table t1 persistent for all;
+create table t2 as select * from t1;
+truncate table t1;
+insert into t1 select * from t2;
+explain select * from t1 where t1.c2 in (select a.c2 from t1 a) and c2 >= 3 order by c2;
+drop trigger trg_t1;
+drop table t1,t2;
+
+create table t1 (old_c1 integer, old_c2 integer, c1 integer,
+ c2 integer, c3 integer) engine=aria;
+insert into t1 (c1,c2,c3) values
+ (1,1,1), (1,2,2), (1,3,3), (2,1,4), (2,2,5), (2,3,6), (2,4,7), (2,5,8);
+create index t1_c2 on t1 (c2,c1);
+create table t2 as select * from t1;
+truncate table t1;
+insert into t1 select * from t2;
+explain select * from t1 where t1.c2 in (select a.c2 from t1 a) and c2 >= 3 order by c2;
+drop table t1,t2;
diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result
index 76093f16cd7..9d6ac0b61ba 100644
--- a/mysql-test/main/selectivity_innodb.result
+++ b/mysql-test/main/selectivity_innodb.result
@@ -57,7 +57,7 @@ part, supplier, partsupp, nation, region
where
p_partkey = ps_partkey
and s_suppkey = ps_suppkey
-and p_size = 9
+and (p_size = 9 or p_size =19999)
and p_type like '%TIN'
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
@@ -77,18 +77,18 @@ and r_name = 'ASIA'
order by
s_acctbal desc, n_name, s_name, p_partkey;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 0.31 Using where; Using temporary; Using filesort
-1 PRIMARY region ALL PRIMARY NULL NULL NULL 5 20.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY region ALL PRIMARY NULL NULL NULL 5 20.00 Using where; Using temporary; Using filesort
+1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 0.63 Using where; Using join buffer (flat, BNL join)
1 PRIMARY partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where
1 PRIMARY supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.partsupp.ps_suppkey 1 100.00 Using where
1 PRIMARY nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.supplier.s_nationkey 1 100.00 Using where
-2 DEPENDENT SUBQUERY region ALL PRIMARY NULL NULL NULL 5 20.00 Using where
2 DEPENDENT SUBQUERY partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00
2 DEPENDENT SUBQUERY supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.partsupp.ps_suppkey 1 100.00 Using where
2 DEPENDENT SUBQUERY nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.supplier.s_nationkey 1 100.00 Using where
+2 DEPENDENT SUBQUERY region eq_ref PRIMARY PRIMARY 4 dbt3_s001.nation.n_regionkey 1 20.00 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.part.p_partkey' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_acctbal` AS `s_acctbal`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`nation`.`n_name` AS `n_name`,`dbt3_s001`.`part`.`p_partkey` AS `p_partkey`,`dbt3_s001`.`part`.`p_mfgr` AS `p_mfgr`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`dbt3_s001`.`supplier`.`s_comment` AS `s_comment` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`part`.`p_size` = 9 and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_type` like '%TIN' and `dbt3_s001`.`partsupp`.`ps_supplycost` = <expr_cache><`dbt3_s001`.`part`.`p_partkey`>((/* select#2 */ select min(`dbt3_s001`.`partsupp`.`ps_supplycost`) from `dbt3_s001`.`partsupp` join `dbt3_s001`.`supplier` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey`)) order by `dbt3_s001`.`supplier`.`s_acctbal` desc,`dbt3_s001`.`nation`.`n_name`,`dbt3_s001`.`supplier`.`s_name`,`dbt3_s001`.`part`.`p_partkey`
+Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_acctbal` AS `s_acctbal`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`nation`.`n_name` AS `n_name`,`dbt3_s001`.`part`.`p_partkey` AS `p_partkey`,`dbt3_s001`.`part`.`p_mfgr` AS `p_mfgr`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`dbt3_s001`.`supplier`.`s_comment` AS `s_comment` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and (`dbt3_s001`.`part`.`p_size` = 9 or `dbt3_s001`.`part`.`p_size` = 19999) and `dbt3_s001`.`part`.`p_type` like '%TIN' and `dbt3_s001`.`partsupp`.`ps_supplycost` = <expr_cache><`dbt3_s001`.`part`.`p_partkey`>((/* select#2 */ select min(`dbt3_s001`.`partsupp`.`ps_supplycost`) from `dbt3_s001`.`partsupp` join `dbt3_s001`.`supplier` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`region`.`r_regionkey` = `dbt3_s001`.`nation`.`n_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey`)) order by `dbt3_s001`.`supplier`.`s_acctbal` desc,`dbt3_s001`.`nation`.`n_name`,`dbt3_s001`.`supplier`.`s_name`,`dbt3_s001`.`part`.`p_partkey`
set optimizer_use_condition_selectivity=4;
explain extended
select
@@ -123,13 +123,13 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where
1 PRIMARY supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.partsupp.ps_suppkey 1 100.00 Using where
1 PRIMARY nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.supplier.s_nationkey 1 100.00 Using where
-2 DEPENDENT SUBQUERY region ALL PRIMARY NULL NULL NULL 5 20.00 Using where
2 DEPENDENT SUBQUERY partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00
2 DEPENDENT SUBQUERY supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.partsupp.ps_suppkey 1 100.00 Using where
2 DEPENDENT SUBQUERY nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.supplier.s_nationkey 1 100.00 Using where
+2 DEPENDENT SUBQUERY region eq_ref PRIMARY PRIMARY 4 dbt3_s001.nation.n_regionkey 1 20.00 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.part.p_partkey' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_acctbal` AS `s_acctbal`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`nation`.`n_name` AS `n_name`,`dbt3_s001`.`part`.`p_partkey` AS `p_partkey`,`dbt3_s001`.`part`.`p_mfgr` AS `p_mfgr`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`dbt3_s001`.`supplier`.`s_comment` AS `s_comment` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`part`.`p_size` = 9 and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_type` like '%TIN' and `dbt3_s001`.`partsupp`.`ps_supplycost` = <expr_cache><`dbt3_s001`.`part`.`p_partkey`>((/* select#2 */ select min(`dbt3_s001`.`partsupp`.`ps_supplycost`) from `dbt3_s001`.`partsupp` join `dbt3_s001`.`supplier` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey`)) order by `dbt3_s001`.`supplier`.`s_acctbal` desc,`dbt3_s001`.`nation`.`n_name`,`dbt3_s001`.`supplier`.`s_name`,`dbt3_s001`.`part`.`p_partkey`
+Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_acctbal` AS `s_acctbal`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`nation`.`n_name` AS `n_name`,`dbt3_s001`.`part`.`p_partkey` AS `p_partkey`,`dbt3_s001`.`part`.`p_mfgr` AS `p_mfgr`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`dbt3_s001`.`supplier`.`s_comment` AS `s_comment` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`part`.`p_size` = 9 and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`nation`.`n_regionkey` = `dbt3_s001`.`region`.`r_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_type` like '%TIN' and `dbt3_s001`.`partsupp`.`ps_supplycost` = <expr_cache><`dbt3_s001`.`part`.`p_partkey`>((/* select#2 */ select min(`dbt3_s001`.`partsupp`.`ps_supplycost`) from `dbt3_s001`.`partsupp` join `dbt3_s001`.`supplier` join `dbt3_s001`.`nation` join `dbt3_s001`.`region` where `dbt3_s001`.`supplier`.`s_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`region`.`r_regionkey` = `dbt3_s001`.`nation`.`n_regionkey` and `dbt3_s001`.`region`.`r_name` = 'ASIA' and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey`)) order by `dbt3_s001`.`supplier`.`s_acctbal` desc,`dbt3_s001`.`nation`.`n_name`,`dbt3_s001`.`supplier`.`s_name`,`dbt3_s001`.`part`.`p_partkey`
=== Q15 ===
create view revenue0 (supplier_no, total_revenue) as
select l_suppkey, sum(l_extendedprice * (1 - l_discount))
@@ -171,7 +171,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY supplier index PRIMARY PRIMARY 4 NULL 10 100.00
1 PRIMARY <derived3> ref key0 key0 5 dbt3_s001.supplier.s_suppkey 10 100.00 Using where
3 DERIVED lineitem range i_l_shipdate,i_l_suppkey i_l_shipdate 4 NULL 229 100.00 Using where; Using temporary; Using filesort
-2 SUBQUERY <derived4> ALL NULL NULL NULL NULL 228 100.00
+2 SUBQUERY <derived4> ALL NULL NULL NULL NULL 229 100.00
4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 229 100.00 Using where; Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_suppkey` AS `s_suppkey`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`revenue0`.`total_revenue` AS `total_revenue` from `dbt3_s001`.`supplier` join `dbt3_s001`.`revenue0` where `revenue0`.`supplier_no` = `dbt3_s001`.`supplier`.`s_suppkey` and `revenue0`.`total_revenue` = (/* select#2 */ select max(`revenue0`.`total_revenue`) from `dbt3_s001`.`revenue0`) order by `dbt3_s001`.`supplier`.`s_suppkey`
@@ -334,7 +334,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey 1 100.00
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 dbt3_s001.orders.o_orderkey 1 100.00
1 PRIMARY lineitem ref PRIMARY,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey_quantity 4 dbt3_s001.orders.o_orderkey 4 100.00 Using index
-2 MATERIALIZED lineitem index NULL PRIMARY 8 NULL 6005 100.00
+2 MATERIALIZED lineitem index NULL i_l_orderkey_quantity 13 NULL 6005 100.00 Using index
Warnings:
Note 1003 /* select#1 */ select `dbt3_s001`.`customer`.`c_name` AS `c_name`,`dbt3_s001`.`customer`.`c_custkey` AS `c_custkey`,`dbt3_s001`.`orders`.`o_orderkey` AS `o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE` AS `o_orderdate`,`dbt3_s001`.`orders`.`o_totalprice` AS `o_totalprice`,sum(`dbt3_s001`.`lineitem`.`l_quantity`) AS `sum(l_quantity)` from <materialize> (/* select#2 */ select `dbt3_s001`.`lineitem`.`l_orderkey` from `dbt3_s001`.`lineitem` group by `dbt3_s001`.`lineitem`.`l_orderkey` having sum(`dbt3_s001`.`lineitem`.`l_quantity`) > 250) join `dbt3_s001`.`customer` join `dbt3_s001`.`orders` join `dbt3_s001`.`lineitem` where `dbt3_s001`.`customer`.`c_custkey` = `dbt3_s001`.`orders`.`o_custkey` and `<subquery2>`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey` and `dbt3_s001`.`lineitem`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey` group by `dbt3_s001`.`customer`.`c_name`,`dbt3_s001`.`customer`.`c_custkey`,`dbt3_s001`.`orders`.`o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE`,`dbt3_s001`.`orders`.`o_totalprice` order by `dbt3_s001`.`orders`.`o_totalprice` desc,`dbt3_s001`.`orders`.`o_orderDATE`
select
@@ -368,7 +368,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey 1 100.00
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 dbt3_s001.orders.o_orderkey 1 100.00
1 PRIMARY lineitem ref PRIMARY,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey_quantity 4 dbt3_s001.orders.o_orderkey 4 100.00 Using index
-2 MATERIALIZED lineitem index NULL PRIMARY 8 NULL 6005 100.00
+2 MATERIALIZED lineitem index NULL i_l_orderkey_quantity 13 NULL 6005 100.00 Using index
Warnings:
Note 1003 /* select#1 */ select `dbt3_s001`.`customer`.`c_name` AS `c_name`,`dbt3_s001`.`customer`.`c_custkey` AS `c_custkey`,`dbt3_s001`.`orders`.`o_orderkey` AS `o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE` AS `o_orderdate`,`dbt3_s001`.`orders`.`o_totalprice` AS `o_totalprice`,sum(`dbt3_s001`.`lineitem`.`l_quantity`) AS `sum(l_quantity)` from <materialize> (/* select#2 */ select `dbt3_s001`.`lineitem`.`l_orderkey` from `dbt3_s001`.`lineitem` group by `dbt3_s001`.`lineitem`.`l_orderkey` having sum(`dbt3_s001`.`lineitem`.`l_quantity`) > 250) join `dbt3_s001`.`customer` join `dbt3_s001`.`orders` join `dbt3_s001`.`lineitem` where `dbt3_s001`.`customer`.`c_custkey` = `dbt3_s001`.`orders`.`o_custkey` and `<subquery2>`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey` and `dbt3_s001`.`lineitem`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey` group by `dbt3_s001`.`customer`.`c_name`,`dbt3_s001`.`customer`.`c_custkey`,`dbt3_s001`.`orders`.`o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE`,`dbt3_s001`.`orders`.`o_totalprice` order by `dbt3_s001`.`orders`.`o_totalprice` desc,`dbt3_s001`.`orders`.`o_orderDATE`
select
@@ -495,7 +495,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
2 MATERIALIZED part ALL PRIMARY NULL NULL NULL 200 100.00 Using where
2 MATERIALIZED partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where
-4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 100.00 Using where
+4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.40 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_suppkey' of SELECT #4 was resolved in SELECT #2
@@ -546,14 +546,13 @@ limit 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY nation ALL PRIMARY NULL NULL NULL 25 4.00 Using where; Using temporary; Using filesort
1 PRIMARY supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED part ALL PRIMARY NULL NULL NULL 200 4.17 Using where
-2 MATERIALIZED partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where
+1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 4.17 Using where
+1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 11.99 Using where; FirstMatch(supplier)
4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.40 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_suppkey' of SELECT #4 was resolved in SELECT #2
-Note 1003 /* select#1 */ select sql_calc_found_rows `dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address` from `dbt3_s001`.`supplier` semi join (`dbt3_s001`.`part` join `dbt3_s001`.`partsupp`) join `dbt3_s001`.`nation` where `dbt3_s001`.`supplier`.`s_nationkey` = `dbt3_s001`.`nation`.`n_nationkey` and `dbt3_s001`.`nation`.`n_name` = 'UNITED STATES' and `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`partsupp`.`ps_availqty` > <expr_cache><`dbt3_s001`.`partsupp`.`ps_partkey`,`dbt3_s001`.`partsupp`.`ps_suppkey`>((/* select#4 */ select 0.5 * sum(`dbt3_s001`.`lineitem`.`l_quantity`) from `dbt3_s001`.`lineitem` where `dbt3_s001`.`lineitem`.`l_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`lineitem`.`l_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`lineitem`.`l_shipDATE` >= <cache>(cast('1993-01-01' as date)) and `dbt3_s001`.`lineitem`.`l_shipDATE` < <cache>(cast('1993-01-01' as date) + interval '1' year))) and `dbt3_s001`.`part`.`p_name` like 'g%' order by `dbt3_s001`.`supplier`.`s_name` limit 10
+Note 1003 /* select#1 */ select sql_calc_found_rows `dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address` from `dbt3_s001`.`supplier` semi join (`dbt3_s001`.`part` join `dbt3_s001`.`partsupp`) join `dbt3_s001`.`nation` where `dbt3_s001`.`supplier`.`s_nationkey` = `dbt3_s001`.`nation`.`n_nationkey` and `dbt3_s001`.`nation`.`n_name` = 'UNITED STATES' and `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`partsupp`.`ps_suppkey` = `dbt3_s001`.`supplier`.`s_suppkey` and `dbt3_s001`.`partsupp`.`ps_availqty` > <expr_cache><`dbt3_s001`.`partsupp`.`ps_partkey`,`dbt3_s001`.`partsupp`.`ps_suppkey`>((/* select#4 */ select 0.5 * sum(`dbt3_s001`.`lineitem`.`l_quantity`) from `dbt3_s001`.`lineitem` where `dbt3_s001`.`lineitem`.`l_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`lineitem`.`l_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`lineitem`.`l_shipDATE` >= <cache>(cast('1993-01-01' as date)) and `dbt3_s001`.`lineitem`.`l_shipDATE` < <cache>(cast('1993-01-01' as date) + interval '1' year))) and `dbt3_s001`.`part`.`p_name` like 'g%' order by `dbt3_s001`.`supplier`.`s_name` limit 10
select sql_calc_found_rows
s_name, s_address
from supplier, nation
@@ -602,14 +601,13 @@ limit 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY nation ALL PRIMARY NULL NULL NULL 25 4.00 Using where; Using temporary; Using filesort
1 PRIMARY supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED part ALL PRIMARY NULL NULL NULL 200 7.03 Using where
-2 MATERIALIZED partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where
+1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 7.03 Using where
+1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 7.11 Using where; FirstMatch(supplier)
4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.40 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_suppkey' of SELECT #4 was resolved in SELECT #2
-Note 1003 /* select#1 */ select sql_calc_found_rows `dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address` from `dbt3_s001`.`supplier` semi join (`dbt3_s001`.`part` join `dbt3_s001`.`partsupp`) join `dbt3_s001`.`nation` where `dbt3_s001`.`supplier`.`s_nationkey` = `dbt3_s001`.`nation`.`n_nationkey` and `dbt3_s001`.`nation`.`n_name` = 'UNITED STATES' and `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`partsupp`.`ps_availqty` > <expr_cache><`dbt3_s001`.`partsupp`.`ps_partkey`,`dbt3_s001`.`partsupp`.`ps_suppkey`>((/* select#4 */ select 0.5 * sum(`dbt3_s001`.`lineitem`.`l_quantity`) from `dbt3_s001`.`lineitem` where `dbt3_s001`.`lineitem`.`l_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`lineitem`.`l_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`lineitem`.`l_shipDATE` >= <cache>(cast('1993-01-01' as date)) and `dbt3_s001`.`lineitem`.`l_shipDATE` < <cache>(cast('1993-01-01' as date) + interval '1' year))) and `dbt3_s001`.`part`.`p_name` like 'g%' order by `dbt3_s001`.`supplier`.`s_name` limit 10
+Note 1003 /* select#1 */ select sql_calc_found_rows `dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address` from `dbt3_s001`.`supplier` semi join (`dbt3_s001`.`part` join `dbt3_s001`.`partsupp`) join `dbt3_s001`.`nation` where `dbt3_s001`.`supplier`.`s_nationkey` = `dbt3_s001`.`nation`.`n_nationkey` and `dbt3_s001`.`nation`.`n_name` = 'UNITED STATES' and `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`partsupp`.`ps_suppkey` = `dbt3_s001`.`supplier`.`s_suppkey` and `dbt3_s001`.`partsupp`.`ps_availqty` > <expr_cache><`dbt3_s001`.`partsupp`.`ps_partkey`,`dbt3_s001`.`partsupp`.`ps_suppkey`>((/* select#4 */ select 0.5 * sum(`dbt3_s001`.`lineitem`.`l_quantity`) from `dbt3_s001`.`lineitem` where `dbt3_s001`.`lineitem`.`l_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`lineitem`.`l_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`lineitem`.`l_shipDATE` >= <cache>(cast('1993-01-01' as date)) and `dbt3_s001`.`lineitem`.`l_shipDATE` < <cache>(cast('1993-01-01' as date) + interval '1' year))) and `dbt3_s001`.`part`.`p_name` like 'g%' order by `dbt3_s001`.`supplier`.`s_name` limit 10
select sql_calc_found_rows
s_name, s_address
from supplier, nation
@@ -658,14 +656,13 @@ limit 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY nation ALL PRIMARY NULL NULL NULL 25 4.00 Using where; Using temporary; Using filesort
1 PRIMARY supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED part ALL PRIMARY NULL NULL NULL 200 7.81 Using where
-2 MATERIALIZED partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where
+1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 7.81 Using where
+1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 6.40 Using where; FirstMatch(supplier)
4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.40 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_suppkey' of SELECT #4 was resolved in SELECT #2
-Note 1003 /* select#1 */ select sql_calc_found_rows `dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address` from `dbt3_s001`.`supplier` semi join (`dbt3_s001`.`part` join `dbt3_s001`.`partsupp`) join `dbt3_s001`.`nation` where `dbt3_s001`.`supplier`.`s_nationkey` = `dbt3_s001`.`nation`.`n_nationkey` and `dbt3_s001`.`nation`.`n_name` = 'UNITED STATES' and `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`partsupp`.`ps_availqty` > <expr_cache><`dbt3_s001`.`partsupp`.`ps_partkey`,`dbt3_s001`.`partsupp`.`ps_suppkey`>((/* select#4 */ select 0.5 * sum(`dbt3_s001`.`lineitem`.`l_quantity`) from `dbt3_s001`.`lineitem` where `dbt3_s001`.`lineitem`.`l_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`lineitem`.`l_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`lineitem`.`l_shipDATE` >= <cache>(cast('1993-01-01' as date)) and `dbt3_s001`.`lineitem`.`l_shipDATE` < <cache>(cast('1993-01-01' as date) + interval '1' year))) and `dbt3_s001`.`part`.`p_name` like 'g%' order by `dbt3_s001`.`supplier`.`s_name` limit 10
+Note 1003 /* select#1 */ select sql_calc_found_rows `dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address` from `dbt3_s001`.`supplier` semi join (`dbt3_s001`.`part` join `dbt3_s001`.`partsupp`) join `dbt3_s001`.`nation` where `dbt3_s001`.`supplier`.`s_nationkey` = `dbt3_s001`.`nation`.`n_nationkey` and `dbt3_s001`.`nation`.`n_name` = 'UNITED STATES' and `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`partsupp`.`ps_suppkey` = `dbt3_s001`.`supplier`.`s_suppkey` and `dbt3_s001`.`partsupp`.`ps_availqty` > <expr_cache><`dbt3_s001`.`partsupp`.`ps_partkey`,`dbt3_s001`.`partsupp`.`ps_suppkey`>((/* select#4 */ select 0.5 * sum(`dbt3_s001`.`lineitem`.`l_quantity`) from `dbt3_s001`.`lineitem` where `dbt3_s001`.`lineitem`.`l_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`lineitem`.`l_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`lineitem`.`l_shipDATE` >= <cache>(cast('1993-01-01' as date)) and `dbt3_s001`.`lineitem`.`l_shipDATE` < <cache>(cast('1993-01-01' as date) + interval '1' year))) and `dbt3_s001`.`part`.`p_name` like 'g%' order by `dbt3_s001`.`supplier`.`s_name` limit 10
select sql_calc_found_rows
s_name, s_address
from supplier, nation
@@ -714,14 +711,13 @@ limit 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY nation ALL PRIMARY NULL NULL NULL 25 4.00 Using where; Using temporary; Using filesort
1 PRIMARY supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED part ALL PRIMARY NULL NULL NULL 200 7.81 Using where
-2 MATERIALIZED partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where
+1 PRIMARY part ALL PRIMARY NULL NULL NULL 200 7.81 Using where
+1 PRIMARY partsupp eq_ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 8 dbt3_s001.part.p_partkey,dbt3_s001.supplier.s_suppkey 1 6.40 Using where; FirstMatch(supplier)
4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.40 Using where
Warnings:
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2
Note 1276 Field or reference 'dbt3_s001.partsupp.ps_suppkey' of SELECT #4 was resolved in SELECT #2
-Note 1003 /* select#1 */ select sql_calc_found_rows `dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address` from `dbt3_s001`.`supplier` semi join (`dbt3_s001`.`part` join `dbt3_s001`.`partsupp`) join `dbt3_s001`.`nation` where `dbt3_s001`.`supplier`.`s_nationkey` = `dbt3_s001`.`nation`.`n_nationkey` and `dbt3_s001`.`nation`.`n_name` = 'UNITED STATES' and `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`partsupp`.`ps_availqty` > <expr_cache><`dbt3_s001`.`partsupp`.`ps_partkey`,`dbt3_s001`.`partsupp`.`ps_suppkey`>((/* select#4 */ select 0.5 * sum(`dbt3_s001`.`lineitem`.`l_quantity`) from `dbt3_s001`.`lineitem` where `dbt3_s001`.`lineitem`.`l_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`lineitem`.`l_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`lineitem`.`l_shipDATE` >= <cache>(cast('1993-01-01' as date)) and `dbt3_s001`.`lineitem`.`l_shipDATE` < <cache>(cast('1993-01-01' as date) + interval '1' year))) and `dbt3_s001`.`part`.`p_name` like 'g%' order by `dbt3_s001`.`supplier`.`s_name` limit 10
+Note 1003 /* select#1 */ select sql_calc_found_rows `dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address` from `dbt3_s001`.`supplier` semi join (`dbt3_s001`.`part` join `dbt3_s001`.`partsupp`) join `dbt3_s001`.`nation` where `dbt3_s001`.`supplier`.`s_nationkey` = `dbt3_s001`.`nation`.`n_nationkey` and `dbt3_s001`.`nation`.`n_name` = 'UNITED STATES' and `dbt3_s001`.`partsupp`.`ps_partkey` = `dbt3_s001`.`part`.`p_partkey` and `dbt3_s001`.`partsupp`.`ps_suppkey` = `dbt3_s001`.`supplier`.`s_suppkey` and `dbt3_s001`.`partsupp`.`ps_availqty` > <expr_cache><`dbt3_s001`.`partsupp`.`ps_partkey`,`dbt3_s001`.`partsupp`.`ps_suppkey`>((/* select#4 */ select 0.5 * sum(`dbt3_s001`.`lineitem`.`l_quantity`) from `dbt3_s001`.`lineitem` where `dbt3_s001`.`lineitem`.`l_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`lineitem`.`l_suppkey` = `dbt3_s001`.`partsupp`.`ps_suppkey` and `dbt3_s001`.`lineitem`.`l_shipDATE` >= <cache>(cast('1993-01-01' as date)) and `dbt3_s001`.`lineitem`.`l_shipDATE` < <cache>(cast('1993-01-01' as date) + interval '1' year))) and `dbt3_s001`.`part`.`p_name` like 'g%' order by `dbt3_s001`.`supplier`.`s_name` limit 10
select sql_calc_found_rows
s_name, s_address
from supplier, nation
@@ -808,10 +804,9 @@ explain extended
select * from t1 where a in ( select b from t2 ) AND ( a > 3 );
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 1 100.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 50.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t1`.`a` > 3
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`a` and `test`.`t1`.`a` > 3
select * from t1 where a in ( select b from t2 ) AND ( a > 3 );
a
drop table t1,t2;
@@ -946,7 +941,7 @@ set optimizer_switch='index_condition_pushdown=off';
EXPLAIN EXTENDED
SELECT * FROM t1, t2 WHERE a > 9;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range a a 5 NULL 1 0.00 Using where
+1 SIMPLE t1 range a a 5 NULL 1 100.00 Using where
1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` > 9
@@ -1653,27 +1648,51 @@ drop function f1;
#
create table t1 (a int, b int, key (b), key (a));
insert into t1
-select (rand(1)*1000)/10, (rand(1001)*1000)/20 from seq_1_to_1000;
-analyze table t1 persistent for all;
+select (rand(1)*1000)/30, (rand(1001)*1000)/40 from seq_1_to_1000;
+analyze table t1 ;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+select count(*) from t1 where b=2;
+count(*)
+42
+select count(*) from t1 where a in (17,51,5);
+count(*)
+62
# Check what info the optimizer has about selectivities
explain extended select * from t1 use index () where a in (17,51,5);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 2.90 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 6.20 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` USE INDEX () where `test`.`t1`.`a` in (17,51,5)
explain extended select * from t1 use index () where b=2;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 2.40 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 4.20 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` USE INDEX () where `test`.`t1`.`b` = 2
# Now, the equality is used for ref access, while the range condition
# gives selectivity data
explain extended select * from t1 where a in (17,51,5) and b=2;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ref b,a b 5 const 24 2.90 Using where
+1 SIMPLE t1 ref|filter b,a b|a 5|5 const 42 (6%) 6.30 Using where; Using rowid filter
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (17,51,5)
+truncate table t1;
+insert into t1
+select (rand(1)*1000)/10, (rand(1001)*1000)/50 from seq_1_to_1000;
+analyze table t1 ;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+select count(*) from t1 where b=2;
+count(*)
+59
+select count(*) from t1 where a in (17,51,5);
+count(*)
+29
+explain extended select * from t1 where a in (17,51,5) and b=2;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range|filter b,a a|b 5|5 NULL 29 (6%) 5.90 Using index condition; Using where; Using rowid filter
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` = 2 and `test`.`t1`.`a` in (17,51,5)
drop table t1;
@@ -1781,7 +1800,7 @@ explain extended select t1.b,t2.a,t3.a,t3.b from t1,t2,t3
where t1.c = t2.a AND t1.d = t3.a and t1.a = 50 and t1.b <= 100;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range a a 10 NULL 11 100.00 Using index condition; Using where
-1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.c 1 100.00 Using index
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.c 1 100.00
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.d 1 100.00
Warnings:
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t2`.`a` = `test`.`t1`.`c` and `test`.`t3`.`a` = `test`.`t1`.`d` and `test`.`t1`.`a` = 50 and `test`.`t1`.`b` <= 100
@@ -1802,8 +1821,8 @@ set optimizer_use_condition_selectivity=2;
explain extended select t1.b,t2.a,t3.a,t3.b from t1,t2,t3
where t1.c = t2.a AND t1.d = t3.a and t1.a = 50 and t1.b <= 100;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range a a 10 NULL 11 11.00 Using index condition; Using where
-1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.c 1 100.00 Using index
+1 SIMPLE t1 range a a 10 NULL 11 100.00 Using index condition; Using where
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.c 1 100.00
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.d 1 100.00
Warnings:
Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t2`.`a` = `test`.`t1`.`c` and `test`.`t3`.`a` = `test`.`t1`.`d` and `test`.`t1`.`a` = 50 and `test`.`t1`.`b` <= 100
@@ -1820,6 +1839,18 @@ b a a b
7 7 8 8
8 8 9 9
9 9 10 10
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+explain extended select t1.b,t2.a,t3.a,t3.b from t1,t2,t3
+where t1.c = t2.a AND t1.d = t3.a and t1.a = 50 and t1.b <= 100;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 range a a 10 NULL 11 100.00 Using index condition; Using where
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.c 1 100.00
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t1.d 1 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`b` AS `b`,`test`.`t2`.`a` AS `a`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t2`.`a` = `test`.`t1`.`c` and `test`.`t3`.`a` = `test`.`t1`.`d` and `test`.`t1`.`a` = 50 and `test`.`t1`.`b` <= 100
set optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
drop table t1,t2,t3;
#
@@ -1959,6 +1990,88 @@ set use_stat_tables= @save_use_stat_tables;
DROP TABLE t1;
# End of 10.2 tests
set @@global.histogram_size=@save_histogram_size;
+#
+# MDEV-20595
+# Assertion `0 < sel && sel <= 2.0' failed in table_cond_selectivity
+#
+create table t1 (id int, a int, PRIMARY KEY(id), key(a));
+insert into t1 select seq,seq from seq_1_to_100;
+create table t2 (id int, a int, b int, PRIMARY KEY(id), key(a), key(b));
+insert into t2 select seq,seq,seq from seq_1_to_100;
+set optimizer_use_condition_selectivity=2;
+EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE A const PRIMARY,a PRIMARY 4 const 1
+1 SIMPLE B ref a a 5 const 1 Using index
+set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
+drop table t1,t2;
+#
+# MDEV-30360 Assertion `cond_selectivity <= 1.000000001' failed in get_range_limit_read_cost
+# with LIMIT .. OFFSET
+#
+CREATE TABLE t1 (a INT, b VARCHAR(1), KEY(b), KEY(a)) engine=myisam;
+INSERT INTO t1 VALUES
+(3,'a'),(2,'g'),(5,'v'),(9,'n'),(6,'u'),
+(7,'s'),(0,'z'),(3,'z'),(NULL,'m'),(6,'r');
+CREATE TABLE t2 (pk INT PRIMARY KEY);
+INSERT INTO t2 VALUES (1),(2);
+SELECT STRAIGHT_JOIN pk FROM t1 JOIN t2 ON a = pk WHERE b >= 'A' ORDER BY t2.pk LIMIT 8 OFFSET 1;
+pk
+DROP TABLE t1, t2;
+#
+# MDEV-30659 Server crash on EXPLAIN SELECT/SELECT on table with
+# engine Aria for LooseScan Strategy
+#
+create table t1 (old_c1 integer, old_c2 integer, c1 integer,
+c2 integer, c3 integer) engine=aria;
+insert into t1(c1,c2,c3)
+values (1,1,1), (1,2,2), (1,3,3),
+(2,1,4), (2,2,5), (2,3,6),
+(2,4,7), (2,5,8);
+create index t1_c2 on t1 (c2,c1);
+explain select * from t1 where t1.c2 in (select a.c2 from t1 a) and
+c2 >= 3 order by c2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY a range t1_c2 t1_c2 5 NULL 5 Using where; Using index; LooseScan
+1 PRIMARY t1 ref t1_c2 t1_c2 5 test.a.c2 1
+drop table t1;
+create table t1 (old_c1 integer, old_c2 integer, c1 integer,
+c2 integer, c3 integer) engine=aria;
+create trigger trg_t1 before update on t1 for each row
+begin
+set new.old_c1=old.c1;
+set new.old_c2=old.c2;
+end;
+/
+insert into t1 (c1,c2,c3) values
+(1,1,1), (1,2,2), (1,3,3), (2,1,4), (2,2,5), (2,3,6), (2,4,7), (2,5,8);
+create index t1_c2 on t1 (c2,c1);
+analyze table t1 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+create table t2 as select * from t1;
+truncate table t1;
+insert into t1 select * from t2;
+explain select * from t1 where t1.c2 in (select a.c2 from t1 a) and c2 >= 3 order by c2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY a range t1_c2 t1_c2 5 NULL 5 Using where; Using index; LooseScan
+1 PRIMARY t1 ref t1_c2 t1_c2 5 test.a.c2 1
+drop trigger trg_t1;
+drop table t1,t2;
+create table t1 (old_c1 integer, old_c2 integer, c1 integer,
+c2 integer, c3 integer) engine=aria;
+insert into t1 (c1,c2,c3) values
+(1,1,1), (1,2,2), (1,3,3), (2,1,4), (2,2,5), (2,3,6), (2,4,7), (2,5,8);
+create index t1_c2 on t1 (c2,c1);
+create table t2 as select * from t1;
+truncate table t1;
+insert into t1 select * from t2;
+explain select * from t1 where t1.c2 in (select a.c2 from t1 a) and c2 >= 3 order by c2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY a range t1_c2 t1_c2 5 NULL 5 Using where; Using index; LooseScan
+1 PRIMARY t1 ref t1_c2 t1_c2 5 test.a.c2 2
+drop table t1,t2;
set optimizer_switch=@save_optimizer_switch_for_selectivity_test;
set @tmp_ust= @@use_stat_tables;
set @tmp_oucs= @@optimizer_use_condition_selectivity;
@@ -2162,3 +2275,61 @@ set optimizer_use_condition_selectivity= @tmp_oucs;
set @@global.histogram_size=@save_histogram_size;
SET SESSION DEFAULT_STORAGE_ENGINE=DEFAULT;
SET GLOBAL innodb_stats_persistent=@save_stats_persistent;
+#
+# MDEV-30313 Sporadic assertion `cond_selectivity <= 1.0' failure in get_range_limit_read_cost
+#
+CREATE TABLE t (a CHAR(8), b INT, c TIMESTAMP, KEY(b,c)) ENGINE=InnoDB;
+INSERT INTO t VALUES
+('g',1,'1980-09-26'),('l',2,'1979-10-07'),('e',3,'1992-04-22'),
+('v',9,'1975-09-21'),('w',3,'1973-10-06'),('y',8,'1986-10-28'),
+('a',4,'2015-02-15'),('v',9,'1980-01-13'),('f',1,'1972-02-27'),
+('z',7,'1981-05-25'),('z',8,'1980-06-14'),('c',9,'1985-01-24'),
+('x',5,'1999-12-14'),('h',3,'1994-12-18'),('j',6,'1985-08-17'),
+('b',6,'1989-08-02'),('h',6,'2024-07-06'),('h',4,'2024-02-10'),
+('s',1,'1981-07-21'),('c',2,'1988-09-16'),('e',3,'1981-08-26'),
+('a',2,'1986-05-23'),('l',0,'1997-12-19'),('b',5,'2018-05-01'),
+('q',2,'1990-01-01'),('v',9,'1982-10-12'),('x',2,'2005-04-29'),
+('f',8,'2005-08-20'),('d',3,'2002-01-24'),('b',9,'1982-02-04'),
+('a',4,'1978-04-12'),('c',9,'1984-06-08'),('n',9,'1983-10-19'),
+('l',1,'2023-01-05'),('f',2,'1988-11-18'),('a',9,'1977-11-11'),
+('k',2,'1980-09-27'),('i',7,'1988-08-09'),('e',4,'1992-07-30'),
+('l',5,'1980-01-01'),('h',5,'2011-12-24'),('d',6,'2035-03-28'),
+('h',7,'1994-05-14'),('y',1,'1990-01-01'),('x',6,'1981-09-12'),
+('x',9,'1980-01-01'),('s',9,'1995-11-09'),('i',4,'1980-01-01'),
+('p',4,'1980-01-01'),('a',6,'2026-05-05'),('c',6,'1991-09-23'),
+('l',8,'1980-01-01'),('n',4,'1999-09-15'),('b',1,'2011-07-23'),
+('a',9,'1980-01-01'),('a',0,'1977-12-21'),('v',6,'1986-10-29'),
+('r',0,'1997-03-27'),('a',9,'2000-05-05'),('x',1,'1990-01-01'),
+('n',7,'1985-08-01'),('m',6,'1994-09-14'),('s',9,'2009-09-27'),
+('r',8,'2028-10-30'),('e',6,'1982-08-31'),('x',0,'1989-12-21'),
+('d',0,'1984-06-24'),('r',6,'1982-02-11'),('a',3,'1997-10-22'),
+('s',9,'2007-08-29'),('a',3,'1990-01-01'),('o',1,'2015-02-10'),
+('x',0,'1978-08-30'),('k',5,'1989-06-15'),('b',0,'1984-08-21'),
+('v',0,'1990-01-01'),('a',9,'1993-06-23'),('n',5,'1979-11-10'),
+('o',8,'2024-08-31'),('k',6,'1983-12-25'),('y',5,'2013-02-19'),
+('a',9,'1989-12-03'),('k',4,'1973-08-07'),('o',7,'1988-03-19'),
+('o',3,'2007-01-07'),('t',6,'1990-02-22'),('f',4,'2032-10-22'),
+('p',0,'1977-09-12'),('f',3,'2036-11-26'),('a',9,'2008-06-26'),
+('k',2,'2004-09-11'),('x',1,'2005-07-28'),('s',8,'2027-08-28'),
+('a',8,'2000-06-11'),('a',7,'2005-05-20'),('u',9,'1980-01-01'),
+('v',5,'1990-01-01'),('x',7,'1984-11-01'),('a',1,'2006-05-14');
+SELECT b FROM t WHERE a > 'a' GROUP BY b HAVING b >= 6 OR b <= 0;
+b
+0
+6
+7
+8
+9
+DROP TABLE t;
+#
+# MDEV-30693: Assertion `dbl_records <= s->records' failed in apply_selectivity_for_table on SELECT
+#
+set @tmp_oucs= @@optimizer_use_condition_selectivity;
+CREATE TABLE t1 (c INT KEY) ENGINE=InnoDB;
+SELECT * FROM (SELECT * FROM t1) a JOIN (SELECT * FROM (SELECT * FROM t1 GROUP BY c) d WHERE c>1) b ON a.c=b.c;
+c c
+DROP TABLE t1;
+SET optimizer_use_condition_selectivity=1;
+#
+# End of 11.0 tests
+#
diff --git a/mysql-test/main/selectivity_innodb.test b/mysql-test/main/selectivity_innodb.test
index c970e7d871d..efdb3c1853b 100644
--- a/mysql-test/main/selectivity_innodb.test
+++ b/mysql-test/main/selectivity_innodb.test
@@ -236,3 +236,61 @@ set optimizer_use_condition_selectivity= @tmp_oucs;
set @@global.histogram_size=@save_histogram_size;
SET SESSION DEFAULT_STORAGE_ENGINE=DEFAULT;
SET GLOBAL innodb_stats_persistent=@save_stats_persistent;
+
+--echo #
+--echo # MDEV-30313 Sporadic assertion `cond_selectivity <= 1.0' failure in get_range_limit_read_cost
+--echo #
+
+CREATE TABLE t (a CHAR(8), b INT, c TIMESTAMP, KEY(b,c)) ENGINE=InnoDB;
+INSERT INTO t VALUES
+('g',1,'1980-09-26'),('l',2,'1979-10-07'),('e',3,'1992-04-22'),
+('v',9,'1975-09-21'),('w',3,'1973-10-06'),('y',8,'1986-10-28'),
+('a',4,'2015-02-15'),('v',9,'1980-01-13'),('f',1,'1972-02-27'),
+('z',7,'1981-05-25'),('z',8,'1980-06-14'),('c',9,'1985-01-24'),
+('x',5,'1999-12-14'),('h',3,'1994-12-18'),('j',6,'1985-08-17'),
+('b',6,'1989-08-02'),('h',6,'2024-07-06'),('h',4,'2024-02-10'),
+('s',1,'1981-07-21'),('c',2,'1988-09-16'),('e',3,'1981-08-26'),
+('a',2,'1986-05-23'),('l',0,'1997-12-19'),('b',5,'2018-05-01'),
+('q',2,'1990-01-01'),('v',9,'1982-10-12'),('x',2,'2005-04-29'),
+('f',8,'2005-08-20'),('d',3,'2002-01-24'),('b',9,'1982-02-04'),
+('a',4,'1978-04-12'),('c',9,'1984-06-08'),('n',9,'1983-10-19'),
+('l',1,'2023-01-05'),('f',2,'1988-11-18'),('a',9,'1977-11-11'),
+('k',2,'1980-09-27'),('i',7,'1988-08-09'),('e',4,'1992-07-30'),
+('l',5,'1980-01-01'),('h',5,'2011-12-24'),('d',6,'2035-03-28'),
+('h',7,'1994-05-14'),('y',1,'1990-01-01'),('x',6,'1981-09-12'),
+('x',9,'1980-01-01'),('s',9,'1995-11-09'),('i',4,'1980-01-01'),
+('p',4,'1980-01-01'),('a',6,'2026-05-05'),('c',6,'1991-09-23'),
+('l',8,'1980-01-01'),('n',4,'1999-09-15'),('b',1,'2011-07-23'),
+('a',9,'1980-01-01'),('a',0,'1977-12-21'),('v',6,'1986-10-29'),
+('r',0,'1997-03-27'),('a',9,'2000-05-05'),('x',1,'1990-01-01'),
+('n',7,'1985-08-01'),('m',6,'1994-09-14'),('s',9,'2009-09-27'),
+('r',8,'2028-10-30'),('e',6,'1982-08-31'),('x',0,'1989-12-21'),
+('d',0,'1984-06-24'),('r',6,'1982-02-11'),('a',3,'1997-10-22'),
+('s',9,'2007-08-29'),('a',3,'1990-01-01'),('o',1,'2015-02-10'),
+('x',0,'1978-08-30'),('k',5,'1989-06-15'),('b',0,'1984-08-21'),
+('v',0,'1990-01-01'),('a',9,'1993-06-23'),('n',5,'1979-11-10'),
+('o',8,'2024-08-31'),('k',6,'1983-12-25'),('y',5,'2013-02-19'),
+('a',9,'1989-12-03'),('k',4,'1973-08-07'),('o',7,'1988-03-19'),
+('o',3,'2007-01-07'),('t',6,'1990-02-22'),('f',4,'2032-10-22'),
+('p',0,'1977-09-12'),('f',3,'2036-11-26'),('a',9,'2008-06-26'),
+('k',2,'2004-09-11'),('x',1,'2005-07-28'),('s',8,'2027-08-28'),
+('a',8,'2000-06-11'),('a',7,'2005-05-20'),('u',9,'1980-01-01'),
+('v',5,'1990-01-01'),('x',7,'1984-11-01'),('a',1,'2006-05-14');
+
+SELECT b FROM t WHERE a > 'a' GROUP BY b HAVING b >= 6 OR b <= 0;
+
+# Cleanup
+DROP TABLE t;
+
+--echo #
+--echo # MDEV-30693: Assertion `dbl_records <= s->records' failed in apply_selectivity_for_table on SELECT
+--echo #
+set @tmp_oucs= @@optimizer_use_condition_selectivity;
+CREATE TABLE t1 (c INT KEY) ENGINE=InnoDB;
+SELECT * FROM (SELECT * FROM t1) a JOIN (SELECT * FROM (SELECT * FROM t1 GROUP BY c) d WHERE c>1) b ON a.c=b.c;
+DROP TABLE t1;
+SET optimizer_use_condition_selectivity=1;
+
+--echo #
+--echo # End of 11.0 tests
+--echo #
diff --git a/mysql-test/main/selectivity_no_engine.result b/mysql-test/main/selectivity_no_engine.result
index 3811b12a1be..5df1c61e758 100644
--- a/mysql-test/main/selectivity_no_engine.result
+++ b/mysql-test/main/selectivity_no_engine.result
@@ -314,6 +314,25 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE a ALL NULL NULL NULL NULL 5 Using where
1 SIMPLE b hash_ALL NULL #hash#$hj 1341 test.a.Host,test.a.User,test.a.Password,test.a.Select_priv,test.a.Insert_priv,test.a.Update_priv,test.a.Delete_priv,test.a.Create_priv,test.a.Drop_priv,test.a.Reload_priv,test.a.Shutdown_priv,test.a.Process_priv,test.a.File_priv,test.a.Grant_priv,test.a.References_priv,test.a.Index_priv,test.a.Alter_priv,test.a.Show_db_priv,test.a.Super_priv,test.a.Create_tmp_table_priv,test.a.Lock_tables_priv,test.a.Execute_priv,test.a.Repl_slave_priv,test.a.Repl_client_priv,test.a.Create_view_priv,test.a.Show_view_priv,test.a.Create_routine_priv,test.a.Alter_routine_priv,test.a.Create_user_priv,test.a.Event_priv,test.a.Trigger_priv,test.a.Create_tablespace_priv,test.a.Delete_history_priv,test.a.ssl_type,test.a.ssl_cipher,test.a.x509_issuer,test.a.x509_subject,test.a.max_questions,test.a.max_updates,test.a.max_connections,test.a.max_user_connections,test.a.plugin,test.a.authentication_string,test.a.password_expired,test.a.is_role,test.a.default_role,test.a.max_statement_time 5 Using where; Using join buffer (flat, BNLH join)
DROP TABLE t1,t2,t3;
+#
+# MDEV-30529: Assertion `rnd_records <= s->found_records' failed in best_access_path
+#
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b VARCHAR(1), c INT, d VARCHAR(1), e VARCHAR(1), KEY(b), KEY(d), KEY(e)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES ('x',0,'-','-'),
+('x',0,'-','-'),('x',5,'-','-'),('x',0,'-','-'),('x',0,'-','-'),
+('x',0,'-','-'),('x',0,'w','-'),('x',0,'-','-'),('x',0,'-','-'),
+('x',0,'-','-'),('x',0,'-','-'),('x',0,'-','u'),('x',0,'-','-'),
+('x',0,'-','-'),('x',0,'-','t'),('x',0,'-','-'),('x',0,'-','-'),
+('x',0,'-','-'),('x',0,'-','-'),('x',0,'-','-'),('x',0,'-','p'),
+('x',0,'z','-'),('x',0,'-','-'),('x',0,'-','-'),('x',0,'-','v');
+set @tmp_jcl=@@join_cache_level;
+SET JOIN_CACHE_LEVEL= 3;
+SELECT * FROM t1 JOIN t2 ON t1.a = t2.c WHERE t2.b IN ('o') OR t2.e >= 'f' OR t2.d > 'p';
+a b c d e
+set join_cache_level=@tmp_jcl;
+drop table t1,t2;
#
# End of the test file
#
diff --git a/mysql-test/main/selectivity_no_engine.test b/mysql-test/main/selectivity_no_engine.test
index 5bc78e03781..8596fce9bf2 100644
--- a/mysql-test/main/selectivity_no_engine.test
+++ b/mysql-test/main/selectivity_no_engine.test
@@ -250,6 +250,27 @@ SELECT * FROM t1 AS a NATURAL JOIN t1 AS b;
DROP TABLE t1,t2,t3;
+--echo #
+--echo # MDEV-30529: Assertion `rnd_records <= s->found_records' failed in best_access_path
+--echo #
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+
+CREATE TABLE t2 (b VARCHAR(1), c INT, d VARCHAR(1), e VARCHAR(1), KEY(b), KEY(d), KEY(e)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES ('x',0,'-','-'),
+('x',0,'-','-'),('x',5,'-','-'),('x',0,'-','-'),('x',0,'-','-'),
+('x',0,'-','-'),('x',0,'w','-'),('x',0,'-','-'),('x',0,'-','-'),
+('x',0,'-','-'),('x',0,'-','-'),('x',0,'-','u'),('x',0,'-','-'),
+('x',0,'-','-'),('x',0,'-','t'),('x',0,'-','-'),('x',0,'-','-'),
+('x',0,'-','-'),('x',0,'-','-'),('x',0,'-','-'),('x',0,'-','p'),
+('x',0,'z','-'),('x',0,'-','-'),('x',0,'-','-'),('x',0,'-','v');
+
+set @tmp_jcl=@@join_cache_level;
+SET JOIN_CACHE_LEVEL= 3;
+SELECT * FROM t1 JOIN t2 ON t1.a = t2.c WHERE t2.b IN ('o') OR t2.e >= 'f' OR t2.d > 'p';
+set join_cache_level=@tmp_jcl;
+
+drop table t1,t2;
--echo #
--echo # End of the test file
--echo #
diff --git a/mysql-test/main/set_operation.result b/mysql-test/main/set_operation.result
index fa0fe21d156..f01b68357a2 100644
--- a/mysql-test/main/set_operation.result
+++ b/mysql-test/main/set_operation.result
@@ -221,7 +221,7 @@ NULL UNIT RESULT <unit8,9,12> ALL NULL NULL NULL NULL NULL NULL
16 EXCEPT NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNIT RESULT <unit1,15,16> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 /* select#1 */ select `__14`.`1` AS `1` from (/* select#8 */ select `__7`.`1` AS `1` from (/* select#2 */ select 1 AS `1` except /* select#3 */ select 5 AS `5` union /* select#4 */ select 6 AS `6`) `__7` union /* select#9 */ select `__8`.`2` AS `2` from (/* select#5 */ select 2 AS `2` intersect /* select#6 */ select 3 AS `3` intersect /* select#7 */ select 4 AS `4`) `__8` except /* select#12 */ select `__11`.`7` AS `7` from (/* select#10 */ select 7 AS `7` intersect /* select#11 */ select 8 AS `8`) `__11`) `__14` union all /* select#15 */ select `__15`.`9` AS `9` from (/* select#13 */ select 9 AS `9` union all /* select#14 */ select 10 AS `10`) `__15` except all /* select#16 */ select 11 AS `11`
+Note 1003 /* select#1 */ select `__14`.`1` AS `1` from (/* select#8 */ select `__7`.`1` AS `1` from (/* select#2 */ select 1 AS `1` except /* select#3 */ select 5 AS `5` union all /* select#4 */ select 6 AS `6`) `__7` union /* select#9 */ select `__8`.`2` AS `2` from (/* select#5 */ select 2 AS `2` intersect /* select#6 */ select 3 AS `3` intersect /* select#7 */ select 4 AS `4`) `__8` except /* select#12 */ select `__11`.`7` AS `7` from (/* select#10 */ select 7 AS `7` intersect /* select#11 */ select 8 AS `8`) `__11`) `__14` union all /* select#15 */ select `__15`.`9` AS `9` from (/* select#13 */ select 9 AS `9` union all /* select#14 */ select 10 AS `10`) `__15` except all /* select#16 */ select 11 AS `11`
(select 1 union all select 2)
union
(select 3 union all select 4);
@@ -242,7 +242,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
5 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union1,6> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 /* select#1 */ select `__5`.`1` AS `1` from (/* select#2 */ select 1 AS `1` union /* select#3 */ select 2 AS `2`) `__5` union /* select#6 */ select `__6`.`3` AS `3` from (/* select#4 */ select 3 AS `3` union /* select#5 */ select 4 AS `4`) `__6`
+Note 1003 /* select#1 */ select `__5`.`1` AS `1` from (/* select#2 */ select 1 AS `1` union all /* select#3 */ select 2 AS `2`) `__5` union /* select#6 */ select `__6`.`3` AS `3` from (/* select#4 */ select 3 AS `3` union /* select#5 */ select 4 AS `4`) `__6`
(select 1 intersect all select 2)
except
select 3;
@@ -258,7 +258,7 @@ NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL NULL
4 EXCEPT NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL EXCEPT RESULT <except1,4> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 /* select#1 */ select `__4`.`1` AS `1` from (/* select#2 */ select 1 AS `1` intersect /* select#3 */ select 2 AS `2`) `__4` except /* select#4 */ select 3 AS `3`
+Note 1003 /* select#1 */ select `__4`.`1` AS `1` from (/* select#2 */ select 1 AS `1` intersect all /* select#3 */ select 2 AS `2`) `__4` except /* select#4 */ select 3 AS `3`
(select 1 intersect all select 2 intersect all select 3)
intersect
(select 4 intersect all select 5);
@@ -278,7 +278,7 @@ NULL INTERSECT RESULT <intersect2,3,4> ALL NULL NULL NULL NULL NULL NULL
NULL INTERSECT RESULT <intersect5,6> ALL NULL NULL NULL NULL NULL NULL
NULL INTERSECT RESULT <intersect1,7> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 /* select#1 */ select `__6`.`1` AS `1` from (/* select#2 */ select 1 AS `1` intersect /* select#3 */ select 2 AS `2` intersect /* select#4 */ select 3 AS `3`) `__6` intersect /* select#7 */ select `__7`.`4` AS `4` from (/* select#5 */ select 4 AS `4` intersect /* select#6 */ select 5 AS `5`) `__7`
+Note 1003 /* select#1 */ select `__6`.`1` AS `1` from (/* select#2 */ select 1 AS `1` intersect all /* select#3 */ select 2 AS `2` intersect all /* select#4 */ select 3 AS `3`) `__6` intersect /* select#7 */ select `__7`.`4` AS `4` from (/* select#5 */ select 4 AS `4` intersect /* select#6 */ select 5 AS `5`) `__7`
# test set operations with table value constructor
(values (1,1),(1,1),(1,1),(2,2),(2,2),(3,3),(9,9))
INTERSECT ALL
@@ -571,12 +571,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "REPLACED",
"filtered": 100
}
}
@@ -587,12 +590,15 @@ EXPLAIN
"query_block": {
"select_id": 8,
"operation": "UNION",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
@@ -603,12 +609,15 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 6,
+ "cost": "REPLACED",
"filtered": 100
}
}
@@ -628,12 +637,15 @@ EXPLAIN
"query_block": {
"select_id": 4,
"operation": "INTERSECT",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "<derived5>",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
@@ -642,12 +654,15 @@ EXPLAIN
{
"query_block": {
"select_id": 5,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "REPLACED",
"filtered": 100
}
}
@@ -658,12 +673,15 @@ EXPLAIN
"query_block": {
"select_id": 6,
"operation": "UNION",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "REPLACED",
"filtered": 100
}
}
@@ -692,12 +710,15 @@ EXPLAIN
"query_block": {
"select_id": 7,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "REPLACED",
"filtered": 100
}
}
@@ -708,12 +729,15 @@ EXPLAIN
"query_block": {
"select_id": 9,
"operation": "UNION",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 6,
+ "cost": "REPLACED",
"filtered": 100
}
}
@@ -724,12 +748,15 @@ EXPLAIN
"query_block": {
"select_id": 10,
"operation": "UNION",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "REPLACED",
"filtered": 100
}
}
@@ -740,12 +767,15 @@ EXPLAIN
"query_block": {
"select_id": 11,
"operation": "EXCEPT",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "REPLACED",
"filtered": 100
}
},
@@ -754,7 +784,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 5,
"rows": 6,
+ "cost": "REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -769,12 +801,15 @@ EXPLAIN
"query_block": {
"select_id": 12,
"operation": "UNION",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 4"
}
@@ -784,7 +819,9 @@ EXPLAIN
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 5,
"rows": 6,
+ "cost": "REPLACED",
"filtered": 100
},
"buffer_type": "flat",
@@ -799,12 +836,15 @@ EXPLAIN
"query_block": {
"select_id": 13,
"operation": "UNION",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 5,
+ "cost": "REPLACED",
"filtered": 100
}
}
diff --git a/mysql-test/main/set_operation.test b/mysql-test/main/set_operation.test
index c422042f371..e9f71a7e448 100644
--- a/mysql-test/main/set_operation.test
+++ b/mysql-test/main/set_operation.test
@@ -272,6 +272,7 @@ select * from v0 where g < 4
UNION ALL
select * from t3;
+--source include/analyze-format.inc
EXPLAIN format=json
select * from t1
UNION ALL
diff --git a/mysql-test/main/show_analyze.result b/mysql-test/main/show_analyze.result
index 5595fadd60b..2cc18e40d94 100644
--- a/mysql-test/main/show_analyze.result
+++ b/mysql-test/main/show_analyze.result
@@ -405,6 +405,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"filesort": {
@@ -421,9 +422,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
diff --git a/mysql-test/main/show_analyze_json.result b/mysql-test/main/show_analyze_json.result
index 8506c4b9402..87f2ea9a655 100644
--- a/mysql-test/main/show_analyze_json.result
+++ b/mysql-test/main/show_analyze_json.result
@@ -44,15 +44,18 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 1000,
"r_rows": 1000,
+ "cost": "REPLACED",
"filtered": 50,
"r_filtered": 50,
"attached_condition": "t1.c < 500"
@@ -77,15 +80,18 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 1000,
"r_rows": 1000,
+ "cost": "REPLACED",
"filtered": 10,
"r_filtered": 10,
"attached_condition": "t1.c < 10"
@@ -111,6 +117,7 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -119,9 +126,11 @@ SHOW ANALYZE
"table_name": "t1",
"access_type": "ALL",
"possible_keys": ["a"],
+ "loops": 1,
"r_loops": 1,
"rows": 1000,
"r_rows": 1000,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 99.90000153,
@@ -160,15 +169,18 @@ SHOW ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 60,
"attached_condition": "a.a <= 5"
@@ -181,14 +193,17 @@ SHOW ANALYZE
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 0,
"rows": 10,
"r_rows": null,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": null,
"attached_condition": "b.a >= 9"
@@ -229,15 +244,18 @@ SHOW ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 60,
"attached_condition": "a.a <= 5"
@@ -250,15 +268,18 @@ SHOW ANALYZE
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 10,
"attached_condition": "b.a >= 9"
@@ -299,6 +320,7 @@ SHOW ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -306,9 +328,11 @@ SHOW ANALYZE
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -323,14 +347,17 @@ SHOW ANALYZE
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 0,
"rows": 10,
"r_rows": null,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": null,
"attached_condition": "b.a >= 9"
@@ -371,6 +398,7 @@ SHOW ANALYZE
{
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -378,9 +406,11 @@ SHOW ANALYZE
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -395,6 +425,7 @@ SHOW ANALYZE
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -402,9 +433,11 @@ SHOW ANALYZE
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -441,15 +474,18 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 20,
"attached_condition": "a.a < 2"
@@ -460,15 +496,18 @@ SHOW ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 30,
"attached_condition": "b.a > 6"
@@ -501,6 +540,7 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -508,9 +548,11 @@ SHOW ANALYZE
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -523,6 +565,7 @@ SHOW ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -530,9 +573,11 @@ SHOW ANALYZE
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -567,14 +612,17 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 0,
"rows": 10,
"r_rows": null,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": null,
"attached_condition": "a.a < 2"
@@ -588,14 +636,17 @@ SHOW ANALYZE
"r_loops": 0,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 0,
"rows": 10,
"r_rows": null,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": null,
"attached_condition": "b.a + a.a < 10"
@@ -629,15 +680,18 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 20,
"attached_condition": "a.a < 2"
@@ -651,15 +705,18 @@ SHOW ANALYZE
"r_hit_ratio": 0,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 2,
"nested_loop": [
{
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 2,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 95,
"attached_condition": "b.a + a.a < 10"
@@ -693,6 +750,7 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -700,9 +758,11 @@ SHOW ANALYZE
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -718,6 +778,7 @@ SHOW ANALYZE
"r_hit_ratio": 0,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 2,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -725,9 +786,11 @@ SHOW ANALYZE
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 2,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -791,14 +854,17 @@ SHOW ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 0,
"rows": 10,
"r_rows": null,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": null,
"attached_condition": "t2.a + t0.a < 3"
@@ -834,15 +900,18 @@ SHOW ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 30,
"attached_condition": "t2.a + t0.a < 3"
@@ -889,14 +958,17 @@ SHOW ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 0,
"rows": 10,
"r_rows": null,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": null,
"attached_condition": "t2.a + t0.a < 3"
@@ -932,15 +1004,18 @@ SHOW ANALYZE
{
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 30,
"attached_condition": "t2.a + t0.a < 3"
@@ -974,15 +1049,18 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 1,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 100
}
@@ -995,14 +1073,17 @@ SHOW ANALYZE
"r_hit_ratio": 0,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 0,
"rows": 10,
"r_rows": null,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": null,
"attached_condition": "t2.a + t0.a < 3"
@@ -1026,15 +1107,18 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 2,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 100
}
@@ -1047,15 +1131,18 @@ SHOW ANALYZE
"r_hit_ratio": 0,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 30,
"attached_condition": "t2.a + t0.a < 3"
@@ -1079,15 +1166,18 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 100
}
@@ -1100,15 +1190,18 @@ SHOW ANALYZE
"r_hit_ratio": 0,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 2,
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 2,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 25,
"attached_condition": "t2.a + t0.a < 3"
@@ -1146,6 +1239,7 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"nested_loop": [
{
@@ -1161,9 +1255,11 @@ SHOW ANALYZE
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 100
}
@@ -1205,6 +1301,7 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"temporary_table": {
"nested_loop": [
@@ -1212,9 +1309,11 @@ SHOW ANALYZE
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 100
}
@@ -1254,6 +1353,7 @@ SHOW ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"temporary_table": {
"nested_loop": [
@@ -1261,9 +1361,11 @@ SHOW ANALYZE
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"filtered": 100,
"r_filtered": 100
}
diff --git a/mysql-test/main/show_explain.result b/mysql-test/main/show_explain.result
index 6bdc773aa4c..ce335819324 100644
--- a/mysql-test/main/show_explain.result
+++ b/mysql-test/main/show_explain.result
@@ -1264,7 +1264,7 @@ explain
SELECT b AS field1, b AS field2 FROM t1, t2, t3 WHERE d = b ORDER BY field1, field2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 system NULL NULL NULL NULL 1
-1 SIMPLE t1 range b b 6 NULL 107 Using where; Using index
+1 SIMPLE t1 index b b 6 NULL 107 Using where; Using index
1 SIMPLE t3 ref PRIMARY PRIMARY 5 test.t1.b 1 Using index
set @show_explain_probe_select_id=1;
SET debug_dbug='+d,show_explain_probe_do_select';
@@ -1273,7 +1273,7 @@ connection default;
show explain for $thr2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 system NULL NULL NULL NULL 1
-1 SIMPLE t1 range b b 6 NULL 107 Using where; Using index
+1 SIMPLE t1 index b b 6 NULL 107 Using where; Using index
1 SIMPLE t3 ref PRIMARY PRIMARY 5 test.t1.b 1 Using index
Warnings:
Note 1003 SELECT b AS field1, b AS field2 FROM t1, t2, t3 WHERE d = b ORDER BY field1, field2
diff --git a/mysql-test/main/show_explain_json.result b/mysql-test/main/show_explain_json.result
index a5c441af5b8..4a21528e41d 100644
--- a/mysql-test/main/show_explain_json.result
+++ b/mysql-test/main/show_explain_json.result
@@ -47,6 +47,7 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -56,7 +57,9 @@ SHOW EXPLAIN
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 999,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.a < 100000",
"using_index": true
@@ -77,6 +80,7 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -86,7 +90,9 @@ SHOW EXPLAIN
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.a < 10"
}
@@ -106,6 +112,7 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -115,7 +122,9 @@ SHOW EXPLAIN
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.a < 10"
}
@@ -138,6 +147,7 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -147,7 +157,9 @@ SHOW EXPLAIN
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.a < 10",
"mrr_type": "Rowid-ordered scan"
@@ -169,6 +181,7 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -178,7 +191,9 @@ SHOW EXPLAIN
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t1.a < 10",
"mrr_type": "Rowid-ordered scan"
@@ -210,12 +225,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "A",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -226,12 +244,15 @@ SHOW EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "B",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -266,12 +287,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "A",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -282,12 +306,15 @@ SHOW EXPLAIN
"query_block": {
"select_id": 2,
"operation": "UNION",
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "B",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -378,12 +405,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "a.a < 1"
}
@@ -394,12 +424,15 @@ SHOW EXPLAIN
"expression_cache": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "b.a + a.a < 10"
}
@@ -427,12 +460,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "a.a < 1"
}
@@ -443,12 +479,15 @@ SHOW EXPLAIN
"expression_cache": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "b.a + a.a < 10"
}
@@ -476,12 +515,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "a",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "a.a < 1"
}
@@ -492,12 +534,15 @@ SHOW EXPLAIN
"expression_cache": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "b",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "b.a + a.a < 10"
}
@@ -552,12 +597,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -586,12 +634,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -628,12 +679,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -662,12 +716,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -707,12 +764,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -741,12 +801,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -786,12 +849,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -820,12 +886,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -854,12 +923,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -869,12 +941,15 @@ SHOW EXPLAIN
"expression_cache": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -893,12 +968,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -908,12 +986,15 @@ SHOW EXPLAIN
"expression_cache": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -932,12 +1013,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -947,12 +1031,15 @@ SHOW EXPLAIN
"expression_cache": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -986,12 +1073,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1001,12 +1091,15 @@ SHOW EXPLAIN
"expression_cache": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -1025,12 +1118,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1040,12 +1136,15 @@ SHOW EXPLAIN
"expression_cache": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -1064,12 +1163,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1079,12 +1181,15 @@ SHOW EXPLAIN
"expression_cache": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t2.a + t0.a < 3"
}
@@ -1120,6 +1225,7 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"read_sorted_file": {
@@ -1128,7 +1234,9 @@ SHOW EXPLAIN
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1169,13 +1277,16 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1215,13 +1326,16 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"temporary_table": {
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1264,12 +1378,15 @@ SHOW EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t0.a = <cache>(octet_length('гы'))"
}
diff --git a/mysql-test/main/show_explain_json.test b/mysql-test/main/show_explain_json.test
index 8d2a6aa82bb..321fa46be87 100644
--- a/mysql-test/main/show_explain_json.test
+++ b/mysql-test/main/show_explain_json.test
@@ -118,6 +118,7 @@ send select count(*) from t1 where a < 100000;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain FORMAT=JSON for $thr2;
connection con1;
reap;
@@ -126,6 +127,7 @@ reap;
send select max(c) from t1 where a < 10;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp explain FORMAT=JSON for connection $thr2;
connection con1;
reap;
@@ -134,6 +136,7 @@ reap;
send select max(c) from t1 where a < 10;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp explain format=JSON for connection $thr2;
connection con1;
reap;
@@ -145,6 +148,7 @@ set optimizer_switch='index_condition_pushdown=on,mrr=on,mrr_sort_keys=on';
send explain select max(c) from t1 where a < 10;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=json for $thr2;
connection con1;
reap;
@@ -153,6 +157,7 @@ reap;
send explain select max(c) from t1 where a < 10;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp explain format=JSON for connection $thr2;
connection con1;
reap;
@@ -165,6 +170,7 @@ SET debug_dbug='+d,show_explain_probe_join_exec_start';
send explain select a from t0 A union select a+1 from t0 B;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format = JSON for $thr2;
connection con1;
reap;
@@ -177,6 +183,7 @@ SET debug_dbug='+d,show_explain_probe_join_exec_start';
send explain select a from t0 A union select a+1 from t0 B;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=JSON for $thr2;
connection con1;
reap;
@@ -234,6 +241,7 @@ SET debug_dbug='+d,show_explain_probe_join_exec_start';
send select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=JSON for $thr2;
connection con1;
reap;
@@ -245,6 +253,7 @@ SET debug_dbug='+d,show_explain_probe_join_exec_start';
send select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=JSON for $thr2;
connection con1;
reap;
@@ -256,6 +265,7 @@ SET debug_dbug='+d,show_explain_probe_join_exec_end';
send select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=JSON for $thr2;
connection con1;
reap;
@@ -293,8 +303,10 @@ SET debug_dbug='+d,show_explain_probe_join_exec_start';
send update t2 set dummy=0 where (select max(a) from t0 where t2.a + t0.a <3) >3 ;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=JSON for $thr2;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=JSON for $thr2;
connection con1;
reap;
@@ -308,8 +320,10 @@ SET debug_dbug='+d,show_explain_probe_join_exec_start';
send update t2 set dummy=0 where (select max(a) from t0 where t2.a + t0.a <3) >3 ;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp explain format=JSON for connection $thr2;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp explain format=JSON for connection $thr2;
connection con1;
reap;
@@ -326,8 +340,10 @@ SET debug_dbug='+d,show_explain_probe_join_exec_start';
send delete from t2 where (select max(a) from t0 where t2.a + t0.a <3) >3 ;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=JSON for $thr2;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=JSON for $thr2;
connection con1;
reap;
@@ -343,8 +359,10 @@ SET debug_dbug='+d,show_explain_probe_join_exec_start';
send delete from t2 where (select max(a) from t0 where t2.a + t0.a <3) >3 ;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp explain format=JSON for connection $thr2;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp explain format=JSON for connection $thr2;
connection con1;
reap;
@@ -360,10 +378,13 @@ SET debug_dbug='+d,show_explain_probe_join_exec_start';
send select t2.a, ((select max(a) from t0 where t2.a + t0.a <3) >3) as SUBQ from t2;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=json for $thr2;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=json for $thr2;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=json for $thr2;
connection con1;
reap;
@@ -380,10 +401,13 @@ SET debug_dbug='+d,show_explain_probe_join_exec_start';
send select t2.a, ((select max(a) from t0 where t2.a + t0.a <3) >3) as SUBQ from t2;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp explain format=JSON for connection $thr2;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp explain format=JSON for connection $thr2;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp explain format=JSON for connection $thr2;
connection con1;
reap;
@@ -400,6 +424,7 @@ set @show_explain_probe_select_id=1;
send select * from t0 order by a;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=json for $thr2;
connection con1;
reap;
@@ -417,6 +442,7 @@ set @show_explain_probe_select_id=1;
send select distinct a from t0;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=json for $thr2;
connection con1;
reap;
@@ -435,6 +461,7 @@ set @show_explain_probe_select_id=1;
send select distinct a from t0;
connection default;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=json for $thr2;
connection con1;
reap;
@@ -461,6 +488,7 @@ select * from t0 where length('ãû') = a;
connection default;
set names utf8;
--source include/wait_condition.inc
+--source include/explain-no-costs.inc
evalp show explain format=JSON for $thr2;
set names default;
diff --git a/mysql-test/main/signal_demo1.result b/mysql-test/main/signal_demo1.result
index d919f48404f..752f23a48d6 100644
--- a/mysql-test/main/signal_demo1.result
+++ b/mysql-test/main/signal_demo1.result
@@ -75,9 +75,6 @@ end;
end case;
end
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create procedure check_pk_inventory(in id integer)
begin
declare x integer;
@@ -95,8 +92,6 @@ MYSQL_ERRNO = 10000;
end if;
end
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create procedure check_pk_order(in id integer)
begin
declare x integer;
@@ -113,8 +108,6 @@ MYSQL_ERRNO = 10000;
end if;
end
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create trigger po_order_bi before insert on po_order
for each row
begin
diff --git a/mysql-test/main/single_delete_update.result b/mysql-test/main/single_delete_update.result
index 85e79f53c89..6a17895ef8a 100644
--- a/mysql-test/main/single_delete_update.result
+++ b/mysql-test/main/single_delete_update.result
@@ -129,21 +129,21 @@ a b c d
SHOW SESSION STATUS LIKE 'Sort%';
Variable_name Value
Sort_merge_passes 0
-Sort_priority_queue_sorts 1
+Sort_priority_queue_sorts 0
Sort_range 0
-Sort_rows 1
-Sort_scan 1
+Sort_rows 0
+Sort_scan 0
SHOW STATUS LIKE 'Handler_read_%';
Variable_name Value
-Handler_read_first 0
+Handler_read_first 1
Handler_read_key 0
Handler_read_last 0
-Handler_read_next 0
+Handler_read_next 16
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
-Handler_read_rnd_next 17
+Handler_read_rnd_next 0
FLUSH STATUS;
DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
SHOW SESSION STATUS LIKE 'Sort%';
@@ -151,19 +151,19 @@ Variable_name Value
Sort_merge_passes 0
Sort_priority_queue_sorts 0
Sort_range 0
-Sort_rows 1
-Sort_scan 1
+Sort_rows 0
+Sort_scan 0
SHOW STATUS LIKE 'Handler_read_%';
Variable_name Value
-Handler_read_first 0
+Handler_read_first 1
Handler_read_key 0
Handler_read_last 0
-Handler_read_next 0
+Handler_read_next 16
Handler_read_prev 0
Handler_read_retry 0
-Handler_read_rnd 1
+Handler_read_rnd 0
Handler_read_rnd_deleted 0
-Handler_read_rnd_next 17
+Handler_read_rnd_next 0
## should be 5 (previous LIMIT)
SELECT 1 - COUNT(*) FROM t2 WHERE b = 10;
1 - COUNT(*)
@@ -332,6 +332,7 @@ DROP TABLE t2;
#
CREATE TABLE t2 (i INT, key1 INT, key2 INT, INDEX (key1), INDEX (key2));
INSERT INTO t2 (key1, key2) SELECT i, i FROM t1;
+INSERT INTO t2 (key1, key2) SELECT i+100, i+100 FROM t1;
FLUSH STATUS;
SELECT * FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
i key1 key2
@@ -734,41 +735,41 @@ a b c d
SHOW SESSION STATUS LIKE 'Sort%';
Variable_name Value
Sort_merge_passes 0
-Sort_priority_queue_sorts 1
+Sort_priority_queue_sorts 0
Sort_range 0
-Sort_rows 1
-Sort_scan 1
+Sort_rows 0
+Sort_scan 0
SHOW STATUS LIKE 'Handler_read_%';
Variable_name Value
-Handler_read_first 0
+Handler_read_first 1
Handler_read_key 0
Handler_read_last 0
-Handler_read_next 0
+Handler_read_next 16
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
-Handler_read_rnd_next 17
+Handler_read_rnd_next 0
FLUSH STATUS;
UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
SHOW SESSION STATUS LIKE 'Sort%';
Variable_name Value
Sort_merge_passes 0
-Sort_priority_queue_sorts 1
+Sort_priority_queue_sorts 0
Sort_range 0
-Sort_rows 1
-Sort_scan 1
+Sort_rows 0
+Sort_scan 0
SHOW STATUS LIKE 'Handler_read_%';
Variable_name Value
-Handler_read_first 0
+Handler_read_first 1
Handler_read_key 0
Handler_read_last 0
-Handler_read_next 0
+Handler_read_next 16
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 1
Handler_read_rnd_deleted 0
-Handler_read_rnd_next 17
+Handler_read_rnd_next 0
## should be 5 (previous LIMIT)
SELECT COUNT(*) FROM t2 WHERE b = 10 AND d = 10 ORDER BY a, c;
COUNT(*)
@@ -939,6 +940,7 @@ DROP TABLE t2;
#
CREATE TABLE t2 (i INT, key1 INT, key2 INT, INDEX (key1), INDEX (key2));
INSERT INTO t2 (key1, key2) SELECT i, i FROM t1;
+INSERT INTO t2 (key1, key2) SELECT i+100, i+100 FROM t1;
FLUSH STATUS;
SELECT * FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
i key1 key2
diff --git a/mysql-test/main/single_delete_update.test b/mysql-test/main/single_delete_update.test
index 4a4ad5e5a8e..05cff5e1413 100644
--- a/mysql-test/main/single_delete_update.test
+++ b/mysql-test/main/single_delete_update.test
@@ -147,6 +147,7 @@ DROP TABLE t2;
CREATE TABLE t2 (i INT, key1 INT, key2 INT, INDEX (key1), INDEX (key2));
INSERT INTO t2 (key1, key2) SELECT i, i FROM t1;
+INSERT INTO t2 (key1, key2) SELECT i+100, i+100 FROM t1;
FLUSH STATUS;
SELECT * FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
@@ -372,6 +373,7 @@ DROP TABLE t2;
CREATE TABLE t2 (i INT, key1 INT, key2 INT, INDEX (key1), INDEX (key2));
INSERT INTO t2 (key1, key2) SELECT i, i FROM t1;
+INSERT INTO t2 (key1, key2) SELECT i+100, i+100 FROM t1;
FLUSH STATUS;
SELECT * FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
diff --git a/mysql-test/main/skr.result b/mysql-test/main/skr.result
new file mode 100644
index 00000000000..291377573bc
--- /dev/null
+++ b/mysql-test/main/skr.result
@@ -0,0 +1,54 @@
+#
+# MDEV-23406: query with mutually recursive CTEs when big_tables=1
+#
+set @save_big_tables=@@big_tables;
+set big_tables=1;
+Warnings:
+Warning 1287 '@@big_tables' is deprecated and will be removed in a future release
+create table folks(id int, name char(32), dob date, father int, mother int);
+insert into folks values
+(100, 'Me', '2000-01-01', 20, 30),
+(20, 'Dad', '1970-02-02', 10, 9),
+(30, 'Mom', '1975-03-03', 8, 7),
+(10, 'Grandpa Bill', '1940-04-05', null, null),
+(9, 'Grandma Ann', '1941-10-15', null, null),
+(25, 'Uncle Jim', '1968-11-18', 8, 7),
+(98, 'Sister Amy', '2001-06-20', 20, 30),
+(7, 'Grandma Sally', '1943-08-23', null, 6),
+(8, 'Grandpa Ben', '1940-10-21', null, null),
+(6, 'Grandgrandma Martha', '1923-05-17', null, null),
+(67, 'Cousin Eddie', '1992-02-28', 25, 27),
+(27, 'Auntie Melinda', '1971-03-29', null, null);
+with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+select h.*, w.*
+from folks h, folks w, coupled_ancestors a
+where a.father = h.id AND a.mother = w.id
+union
+select h.*, w.*
+from folks v, folks h, folks w
+where v.name = 'Me' and
+(v.father = h.id AND v.mother= w.id)
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+select h_id, h_name, h_dob, h_father, h_mother
+from ancestor_couples
+union
+select w_id, w_name, w_dob, w_father, w_mother
+from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+from ancestor_couples;
+h_name h_dob w_name w_dob
+Dad 1970-02-02 Mom 1975-03-03
+Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15
+Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23
+drop table folks;
+set big_tables=@save_big_tables;
+Warnings:
+Warning 1287 '@@big_tables' is deprecated and will be removed in a future release
diff --git a/mysql-test/main/skr.test b/mysql-test/main/skr.test
new file mode 100644
index 00000000000..3094faff696
--- /dev/null
+++ b/mysql-test/main/skr.test
@@ -0,0 +1,56 @@
+--source include/default_optimizer_switch.inc
+
+--echo #
+--echo # MDEV-23406: query with mutually recursive CTEs when big_tables=1
+--echo #
+
+set @save_big_tables=@@big_tables;
+set big_tables=1;
+
+create table folks(id int, name char(32), dob date, father int, mother int);
+
+insert into folks values
+(100, 'Me', '2000-01-01', 20, 30),
+(20, 'Dad', '1970-02-02', 10, 9),
+(30, 'Mom', '1975-03-03', 8, 7),
+(10, 'Grandpa Bill', '1940-04-05', null, null),
+(9, 'Grandma Ann', '1941-10-15', null, null),
+(25, 'Uncle Jim', '1968-11-18', 8, 7),
+(98, 'Sister Amy', '2001-06-20', 20, 30),
+(7, 'Grandma Sally', '1943-08-23', null, 6),
+(8, 'Grandpa Ben', '1940-10-21', null, null),
+(6, 'Grandgrandma Martha', '1923-05-17', null, null),
+(67, 'Cousin Eddie', '1992-02-28', 25, 27),
+(27, 'Auntie Melinda', '1971-03-29', null, null);
+
+let q=
+with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+ w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+ select h.*, w.*
+ from folks h, folks w, coupled_ancestors a
+ where a.father = h.id AND a.mother = w.id
+ union
+ select h.*, w.*
+ from folks v, folks h, folks w
+ where v.name = 'Me' and
+ (v.father = h.id AND v.mother= w.id)
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+ select h_id, h_name, h_dob, h_father, h_mother
+ from ancestor_couples
+ union
+ select w_id, w_name, w_dob, w_father, w_mother
+ from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+ from ancestor_couples;
+
+eval $q;
+drop table folks;
+
+set big_tables=@save_big_tables;
diff --git a/mysql-test/main/sp-anchor-row-type-cursor.result b/mysql-test/main/sp-anchor-row-type-cursor.result
index e56c51bb82e..e3c6e1fc167 100644
--- a/mysql-test/main/sp-anchor-row-type-cursor.result
+++ b/mysql-test/main/sp-anchor-row-type-cursor.result
@@ -936,8 +936,6 @@ SELECT rec1.a, rec1.b;
END;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -957,8 +955,6 @@ SELECT rec1.a, rec1.b;
END;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -976,8 +972,6 @@ SELECT rec1.a, rec1.b;
END;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
diff --git a/mysql-test/main/sp-anchor-row-type-table.result b/mysql-test/main/sp-anchor-row-type-table.result
index 00fda5f12ad..c1b45c4e7d1 100644
--- a/mysql-test/main/sp-anchor-row-type-table.result
+++ b/mysql-test/main/sp-anchor-row-type-table.result
@@ -606,8 +606,6 @@ SELECT 10,'a','b' FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -624,8 +622,6 @@ SELECT 10,'a' FROM t1 INTO rec1, rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -642,8 +638,6 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
diff --git a/mysql-test/main/sp-anchor-type.result b/mysql-test/main/sp-anchor-type.result
index 47bbed31e19..31c8ff7469b 100644
--- a/mysql-test/main/sp-anchor-type.result
+++ b/mysql-test/main/sp-anchor-type.result
@@ -957,8 +957,6 @@ SELECT * FROM t1 INTO v_a, v_b, v_c;
SELECT v_a, v_b, v_c;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
v_a v_b v_c
1 b1 2001-01-01 10:20:30.123
diff --git a/mysql-test/main/sp-big.result b/mysql-test/main/sp-big.result
index ea93f2cac60..611ac9b74e9 100644
--- a/mysql-test/main/sp-big.result
+++ b/mysql-test/main/sp-big.result
@@ -77,8 +77,6 @@ select count(*) as cnt from (select id1 from t1 force index (primary) where id1
set id1_cond = id1_cond + 1;
end while;
end//
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert t1 select seq, seq, 1, 1, seq, seq, seq from seq_1_to_2000;
set @before=unix_timestamp();
call select_test();
diff --git a/mysql-test/main/sp-error.result b/mysql-test/main/sp-error.result
index c77f58b6a66..f4928d16f55 100644
--- a/mysql-test/main/sp-error.result
+++ b/mysql-test/main/sp-error.result
@@ -1,7 +1,5 @@
drop table if exists t1, t2;
SELECT * FROM mysql.proc INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/proc.txt';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
delete from mysql.proc;
create procedure syntaxerror(t int)|
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
@@ -14,8 +12,6 @@ create table t3 ( x int )|
insert into t3 values (2), (3)|
create procedure bad_into(out param int)
select x from t3 into param|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
call bad_into(@x)|
ERROR 42000: Result consisted of more than one row
drop procedure bad_into|
@@ -2842,8 +2838,6 @@ DECLARE v VARCHAR(5) DEFAULT -1;
SELECT b FROM t1 WHERE a = 2 INTO v;
RETURN v;
END|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Here we check that the NOT_FOUND condition raised in f1()
# is not visible in the outer function (f2), i.e. the continue
diff --git a/mysql-test/main/sp-row.result b/mysql-test/main/sp-row.result
index b66455dfdb9..a47f124f9a7 100644
--- a/mysql-test/main/sp-row.result
+++ b/mysql-test/main/sp-row.result
@@ -2136,8 +2136,6 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -2152,8 +2150,6 @@ SELECT * FROM t1 INTO rec1, rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -2168,8 +2164,6 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
diff --git a/mysql-test/main/sp-security.result b/mysql-test/main/sp-security.result
index eb186dd7faf..f25bff8a920 100644
--- a/mysql-test/main/sp-security.result
+++ b/mysql-test/main/sp-security.result
@@ -432,9 +432,9 @@ CREATE FUNCTION wl2897_f1() RETURNS INT RETURN 1;
connection mysqltest_1_con;
USE mysqltest;
CREATE DEFINER=root@localhost PROCEDURE wl2897_p2() SELECT 2;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
CREATE DEFINER=root@localhost FUNCTION wl2897_f2() RETURNS INT RETURN 2;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
connection mysqltest_2_con;
use mysqltest;
CREATE DEFINER='a @ b @ c'@localhost PROCEDURE wl2897_p3() SELECT 3;
diff --git a/mysql-test/main/sp.result b/mysql-test/main/sp.result
index 37c22e45b45..1204a905a73 100644
--- a/mysql-test/main/sp.result
+++ b/mysql-test/main/sp.result
@@ -320,8 +320,6 @@ repeat(select 1) into outfile 'b2';
insert into test.t1 values (repeat("b2",3), x);
set x = x-1;
until x = 0 end repeat|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
drop procedure b2|
drop procedure if exists c|
create procedure c(x int)
@@ -4283,9 +4281,6 @@ select i as 'A local variable in a nested compound statement takes precedence o
end;
end;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
call bug5967("a - stored procedure parameter")|
a
a - stored procedure parameter
@@ -5779,8 +5774,6 @@ end;
select 1 from no_such_view limit 1 into x;
return x;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function func_8407_b() returns int
begin
declare x int default 0;
diff --git a/mysql-test/main/sp_trans.result b/mysql-test/main/sp_trans.result
index d558442a6e7..10256bf4cb2 100644
--- a/mysql-test/main/sp_trans.result
+++ b/mysql-test/main/sp_trans.result
@@ -506,8 +506,6 @@ insert into t3 select a from t3;
select count(*)*255 from t3 into table_size;
until table_size > max_table_size*2 end repeat;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
call bug14210_fill_table()|
drop procedure bug14210_fill_table|
create table t4 like t3|
diff --git a/mysql-test/main/sp_trans_log.result b/mysql-test/main/sp_trans_log.result
index adc9eafc370..b72e8332fad 100644
--- a/mysql-test/main/sp_trans_log.result
+++ b/mysql-test/main/sp_trans_log.result
@@ -11,8 +11,6 @@ insert into t1 values (null);
select count(*) from t1 into @a;
return @a;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
reset master;
insert into t2 values (bug23333(),1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
diff --git a/mysql-test/main/sql_safe_updates.result b/mysql-test/main/sql_safe_updates.result
index f2944e60489..099aaa9bca6 100644
--- a/mysql-test/main/sql_safe_updates.result
+++ b/mysql-test/main/sql_safe_updates.result
@@ -9,18 +9,23 @@ select @@sql_safe_updates;
#
create table t1 (a int, b int, primary key (a), key (b));
update t1 set b=2 where a=1 or b=2;
-ERROR HY000: You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column
explain update t1 set b=2 where a=1 or b=2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1 Using where
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
delete from t1 where a=1 or b=2;
-ERROR HY000: You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column
explain delete from t1 where a=1 or b=2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1 Using where
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);
+explain update t1 set b=2 where a=1 or b=2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index_merge PRIMARY,b PRIMARY,b 4,5 NULL 2 Using union(PRIMARY,b); Using where; Using buffer
update t1 set b=2 where a=1 or b=2;
+set @@optimizer_switch="index_merge=off";
+update t1 set b=2 where a=1 or b=2;
+ERROR HY000: You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column
delete from t1 where a=1 or b=2;
+ERROR HY000: You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column
drop table t1;
#
# End of 10.3 tests
diff --git a/mysql-test/main/sql_safe_updates.test b/mysql-test/main/sql_safe_updates.test
index 25fe4a15ca2..becabb6881e 100644
--- a/mysql-test/main/sql_safe_updates.test
+++ b/mysql-test/main/sql_safe_updates.test
@@ -7,14 +7,17 @@ select @@sql_safe_updates;
--echo # MDEV-18304 sql_safe_updates does not work with OR clauses
--echo #
create table t1 (a int, b int, primary key (a), key (b));
---error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
update t1 set b=2 where a=1 or b=2;
explain update t1 set b=2 where a=1 or b=2;
---error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
delete from t1 where a=1 or b=2;
explain delete from t1 where a=1 or b=2;
insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8);
+explain update t1 set b=2 where a=1 or b=2;
update t1 set b=2 where a=1 or b=2;
+set @@optimizer_switch="index_merge=off";
+--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
+update t1 set b=2 where a=1 or b=2;
+--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
delete from t1 where a=1 or b=2;
drop table t1;
diff --git a/mysql-test/main/ssl.result b/mysql-test/main/ssl.result
index 794830e5529..2694d177056 100644
--- a/mysql-test/main/ssl.result
+++ b/mysql-test/main/ssl.result
@@ -9,6 +9,7 @@ SHOW STATUS LIKE 'Ssl_server_not_after';
Variable_name Value
Ssl_server_not_after Feb 27 03:03:03 2040 GMT
drop table if exists t1,t2,t3,t4;
+set @@default_storage_engine="aria";
CREATE TABLE t1 (
Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
@@ -609,6 +610,9 @@ explain select t3.t2nr,fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL fld1 NULL NULL NULL 1199 Using where; Using temporary; Using filesort
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.fld1 1 Using where; Using index
+#
+# Some test with ORDER BY and limit
+#
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using filesort
@@ -1298,7 +1302,7 @@ companynr tinyint(2) unsigned zerofill NOT NULL default '00',
companyname char(30) NOT NULL default '',
PRIMARY KEY (companynr),
UNIQUE KEY companyname(companyname)
-) ENGINE=MyISAM MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
+) ENGINE=aria MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
select STRAIGHT_JOIN t2.companynr,companyname from t4,t2 where t2.companynr=t4.companynr group by t2.companynr;
companynr companyname
00 Unknown
@@ -1388,6 +1392,9 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
delete from t2 where fld1=999999;
+#
+# Test left join optimization
+#
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
@@ -1402,15 +1409,15 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 and companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr is null;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1426,11 +1433,11 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0 or companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where ifnull(companynr,1)>0;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/main/ssl_compress.result b/mysql-test/main/ssl_compress.result
index 69de425cdc1..beb21ce8b16 100644
--- a/mysql-test/main/ssl_compress.result
+++ b/mysql-test/main/ssl_compress.result
@@ -6,6 +6,7 @@ SHOW STATUS LIKE 'Compression';
Variable_name Value
Compression ON
drop table if exists t1,t2,t3,t4;
+set @@default_storage_engine="aria";
CREATE TABLE t1 (
Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
@@ -606,6 +607,9 @@ explain select t3.t2nr,fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL fld1 NULL NULL NULL 1199 Using where; Using temporary; Using filesort
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.fld1 1 Using where; Using index
+#
+# Some test with ORDER BY and limit
+#
explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using filesort
@@ -1295,7 +1299,7 @@ companynr tinyint(2) unsigned zerofill NOT NULL default '00',
companyname char(30) NOT NULL default '',
PRIMARY KEY (companynr),
UNIQUE KEY companyname(companyname)
-) ENGINE=MyISAM MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
+) ENGINE=aria MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
select STRAIGHT_JOIN t2.companynr,companyname from t4,t2 where t2.companynr=t4.companynr group by t2.companynr;
companynr companyname
00 Unknown
@@ -1385,6 +1389,9 @@ explain select companynr,companyname from t4 left join t2 using (companynr) wher
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
delete from t2 where fld1=999999;
+#
+# Test left join optimization
+#
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
@@ -1399,15 +1406,15 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 and companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr is null;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1423,11 +1430,11 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0 or companynr > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
+1 SIMPLE t4 range PRIMARY PRIMARY 1 NULL 12 Using index condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
explain select companynr,companyname from t4 left join t2 using (companynr) where ifnull(companynr,1)>0;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/main/ssl_timeout.test b/mysql-test/main/ssl_timeout.test
index f5965f874ff..60b45178d81 100644
--- a/mysql-test/main/ssl_timeout.test
+++ b/mysql-test/main/ssl_timeout.test
@@ -1,4 +1,6 @@
--source include/have_ssl_communication.inc
+# Do not run this test with valgrind as may timeout
+--source include/not_valgrind.inc
# Save the initial number of concurrent sessions
--source include/count_sessions.inc
diff --git a/mysql-test/main/stat_tables.result b/mysql-test/main/stat_tables.result
index 379e9737e1c..a6642d66fb7 100644
--- a/mysql-test/main/stat_tables.result
+++ b/mysql-test/main/stat_tables.result
@@ -214,7 +214,7 @@ order by o_year;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using temporary; Using filesort
1 SIMPLE part ALL PRIMARY NULL NULL NULL 200 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_partkey 5 dbt3_s001.part.p_partkey 30 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey_partkey 5 dbt3_s001.part.p_partkey 30 Using index condition
1 SIMPLE supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.lineitem.l_suppkey 1 Using where
1 SIMPLE n2 eq_ref PRIMARY PRIMARY 4 dbt3_s001.supplier.s_nationkey 1
1 SIMPLE orders eq_ref PRIMARY,i_o_orderdate,i_o_custkey PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
@@ -257,6 +257,26 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE part eq_ref PRIMARY PRIMARY 4 dbt3_s001.partsupp.ps_partkey 1 Using where
1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.supplier.s_suppkey 8
1 SIMPLE orders eq_ref PRIMARY PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1
+EXPLAIN EXTENDED select nation, o_year, sum(amount) as sum_profit
+from (select n_name as nation,
+extract(year from o_orderdate) as o_year,
+l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount
+from part, supplier, lineitem, partsupp, orders, nation
+where s_suppkey = l_suppkey and ps_suppkey = l_suppkey
+and ps_partkey = l_partkey and p_partkey = l_partkey
+and o_orderkey = l_orderkey and s_nationkey = n_nationkey
+and p_name like '%green%') as profit
+group by nation, o_year
+order by nation, o_year desc;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE supplier ALL PRIMARY,i_s_nationkey NULL NULL NULL 10 100.00 Using where; Using temporary; Using filesort
+1 SIMPLE nation eq_ref PRIMARY PRIMARY 4 dbt3_s001.supplier.s_nationkey 1 100.00
+1 SIMPLE partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey i_ps_suppkey 4 dbt3_s001.supplier.s_suppkey 70 100.00
+1 SIMPLE part eq_ref PRIMARY PRIMARY 4 dbt3_s001.partsupp.ps_partkey 1 100.00 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.supplier.s_suppkey 8 100.00
+1 SIMPLE orders eq_ref PRIMARY PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 100.00
+Warnings:
+Note 1003 select `dbt3_s001`.`nation`.`n_name` AS `nation`,extract(year from `dbt3_s001`.`orders`.`o_orderDATE`) AS `o_year`,sum(`dbt3_s001`.`lineitem`.`l_extendedprice` * (1 - `dbt3_s001`.`lineitem`.`l_discount`) - `dbt3_s001`.`partsupp`.`ps_supplycost` * `dbt3_s001`.`lineitem`.`l_quantity`) AS `sum_profit` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`lineitem` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`orders` join `dbt3_s001`.`nation` where `dbt3_s001`.`partsupp`.`ps_suppkey` = `dbt3_s001`.`supplier`.`s_suppkey` and `dbt3_s001`.`lineitem`.`l_suppkey` = `dbt3_s001`.`supplier`.`s_suppkey` and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`lineitem`.`l_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`orders`.`o_orderkey` = `dbt3_s001`.`lineitem`.`l_orderkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`part`.`p_name` like '%green%' group by `dbt3_s001`.`nation`.`n_name`,extract(year from `dbt3_s001`.`orders`.`o_orderDATE`) desc order by `dbt3_s001`.`nation`.`n_name`,extract(year from `dbt3_s001`.`orders`.`o_orderDATE`) desc
select nation, o_year, sum(amount) as sum_profit
from (select n_name as nation,
extract(year from o_orderdate) as o_year,
@@ -337,7 +357,7 @@ and o_orderkey=l_orderkey and p_partkey=l_partkey;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE part range PRIMARY,i_p_retailprice i_p_retailprice 9 NULL 1 Using index condition
1 SIMPLE orders ref PRIMARY,i_o_orderdate i_o_orderdate 4 const 1
-1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey 4 dbt3_s001.orders.o_orderkey 4 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
select o_orderkey, p_partkey
from part, lineitem, orders
where p_retailprice > 1100 and o_orderdate='1997-01-01'
@@ -412,7 +432,7 @@ EXPLAIN
SELECT * FROM t1 STRAIGHT_JOIN t2 WHERE name IN ( 'AUS','YEM' ) AND id = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 Using index
-1 SIMPLE t2 ALL NULL NULL NULL NULL 0 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 1 Using where
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
test.t2 analyze status Engine-independent statistics collected
@@ -591,8 +611,8 @@ set @@use_stat_tables= PREFERABLY;
explain
SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.user;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE PROFILING ALL NULL NULL NULL NULL NULL
-1 SIMPLE global_priv ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
+1 SIMPLE global_priv ALL NULL NULL NULL NULL 5
+1 SIMPLE PROFILING ALL NULL NULL NULL NULL NULL Using join buffer (flat, BNL join)
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
set use_stat_tables=@save_use_stat_tables;
#
diff --git a/mysql-test/main/stat_tables.test b/mysql-test/main/stat_tables.test
index 7488ccb6877..895db3ce72e 100644
--- a/mysql-test/main/stat_tables.test
+++ b/mysql-test/main/stat_tables.test
@@ -4,6 +4,7 @@
--source include/have_stat_tables.inc
--source include/have_partition.inc
+--source include/have_sequence.inc
select @@global.use_stat_tables;
select @@session.use_stat_tables;
@@ -125,7 +126,6 @@ order by o_year;
eval EXPLAIN $Q8;
eval $Q8;
-
let $Q9=
select nation, o_year, sum(amount) as sum_profit
from (select n_name as nation,
@@ -140,6 +140,7 @@ group by nation, o_year
order by nation, o_year desc;
eval EXPLAIN $Q9;
+eval EXPLAIN EXTENDED $Q9;
eval $Q9;
diff --git a/mysql-test/main/stat_tables_innodb.result b/mysql-test/main/stat_tables_innodb.result
index 5b62f228b1f..c90e99a9bbf 100644
--- a/mysql-test/main/stat_tables_innodb.result
+++ b/mysql-test/main/stat_tables_innodb.result
@@ -77,12 +77,12 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
group by n_name
order by revenue desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using temporary; Using filesort
-1 SIMPLE nation ref PRIMARY,i_n_regionkey i_n_regionkey 5 dbt3_s001.region.r_regionkey 5
-1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 Using index
-1 SIMPLE customer ref PRIMARY,i_c_nationkey i_c_nationkey 5 dbt3_s001.nation.n_nationkey 6 Using index
+1 SIMPLE supplier index PRIMARY,i_s_nationkey i_s_nationkey 5 NULL 10 Using where; Using index; Using temporary; Using filesort
+1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.supplier.s_nationkey 1 Using where
+1 SIMPLE region eq_ref PRIMARY PRIMARY 4 dbt3_s001.nation.n_regionkey 1 Using where
+1 SIMPLE customer ref PRIMARY,i_c_nationkey i_c_nationkey 5 dbt3_s001.supplier.s_nationkey 6 Using index
1 SIMPLE orders ref|filter PRIMARY,i_o_orderdate,i_o_custkey i_o_custkey|i_o_orderdate 5|4 dbt3_s001.customer.c_custkey 15 (14%) Using where; Using rowid filter
-1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey 9 dbt3_s001.supplier.s_suppkey,dbt3_s001.orders.o_orderkey 1
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
from customer, orders, lineitem, supplier, nation, region
where c_custkey = o_custkey and l_orderkey = o_orderkey
@@ -208,12 +208,12 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
group by n_name
order by revenue desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using temporary; Using filesort
-1 SIMPLE nation ref PRIMARY,i_n_regionkey i_n_regionkey 5 dbt3_s001.region.r_regionkey 5
-1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 Using index
-1 SIMPLE customer ref PRIMARY,i_c_nationkey i_c_nationkey 5 dbt3_s001.nation.n_nationkey 6 Using index
+1 SIMPLE supplier index PRIMARY,i_s_nationkey i_s_nationkey 5 NULL 10 Using where; Using index; Using temporary; Using filesort
+1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.supplier.s_nationkey 1 Using where
+1 SIMPLE region eq_ref PRIMARY PRIMARY 4 dbt3_s001.nation.n_regionkey 1 Using where
+1 SIMPLE customer ref PRIMARY,i_c_nationkey i_c_nationkey 5 dbt3_s001.supplier.s_nationkey 6 Using index
1 SIMPLE orders ref|filter PRIMARY,i_o_orderdate,i_o_custkey i_o_custkey|i_o_orderdate 5|4 dbt3_s001.customer.c_custkey 15 (14%) Using where; Using rowid filter
-1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey 9 dbt3_s001.supplier.s_suppkey,dbt3_s001.orders.o_orderkey 1
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
from customer, orders, lineitem, supplier, nation, region
where c_custkey = o_custkey and l_orderkey = o_orderkey
@@ -246,12 +246,12 @@ order by o_year;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using temporary; Using filesort
1 SIMPLE part ALL PRIMARY NULL NULL NULL 200 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_partkey 5 dbt3_s001.part.p_partkey 30 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey_partkey 5 dbt3_s001.part.p_partkey 30 Using index condition
1 SIMPLE supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.lineitem.l_suppkey 1 Using where
1 SIMPLE n2 eq_ref PRIMARY PRIMARY 4 dbt3_s001.supplier.s_nationkey 1
1 SIMPLE orders eq_ref PRIMARY,i_o_orderdate,i_o_custkey PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
-1 SIMPLE n1 eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1 Using where
+1 SIMPLE n1 eq_ref PRIMARY,i_n_regionkey i_n_regionkey 9 dbt3_s001.region.r_regionkey,dbt3_s001.customer.c_nationkey 1 Using index
select o_year,
sum(case when nation = 'UNITED STATES' then volume else 0 end) /
sum(volume) as mkt_share
@@ -289,6 +289,26 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE part eq_ref PRIMARY PRIMARY 4 dbt3_s001.partsupp.ps_partkey 1 Using where
1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.supplier.s_suppkey 8
1 SIMPLE orders eq_ref PRIMARY PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1
+EXPLAIN EXTENDED select nation, o_year, sum(amount) as sum_profit
+from (select n_name as nation,
+extract(year from o_orderdate) as o_year,
+l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount
+from part, supplier, lineitem, partsupp, orders, nation
+where s_suppkey = l_suppkey and ps_suppkey = l_suppkey
+and ps_partkey = l_partkey and p_partkey = l_partkey
+and o_orderkey = l_orderkey and s_nationkey = n_nationkey
+and p_name like '%green%') as profit
+group by nation, o_year
+order by nation, o_year desc;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE supplier index PRIMARY,i_s_nationkey i_s_nationkey 5 NULL 10 100.00 Using where; Using index; Using temporary; Using filesort
+1 SIMPLE nation eq_ref PRIMARY PRIMARY 4 dbt3_s001.supplier.s_nationkey 1 100.00
+1 SIMPLE partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey i_ps_suppkey 4 dbt3_s001.supplier.s_suppkey 70 100.00
+1 SIMPLE part eq_ref PRIMARY PRIMARY 4 dbt3_s001.partsupp.ps_partkey 1 100.00 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.supplier.s_suppkey 8 100.00
+1 SIMPLE orders eq_ref PRIMARY PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 100.00
+Warnings:
+Note 1003 select `dbt3_s001`.`nation`.`n_name` AS `nation`,extract(year from `dbt3_s001`.`orders`.`o_orderDATE`) AS `o_year`,sum(`dbt3_s001`.`lineitem`.`l_extendedprice` * (1 - `dbt3_s001`.`lineitem`.`l_discount`) - `dbt3_s001`.`partsupp`.`ps_supplycost` * `dbt3_s001`.`lineitem`.`l_quantity`) AS `sum_profit` from `dbt3_s001`.`part` join `dbt3_s001`.`supplier` join `dbt3_s001`.`lineitem` join `dbt3_s001`.`partsupp` join `dbt3_s001`.`orders` join `dbt3_s001`.`nation` where `dbt3_s001`.`partsupp`.`ps_suppkey` = `dbt3_s001`.`supplier`.`s_suppkey` and `dbt3_s001`.`lineitem`.`l_suppkey` = `dbt3_s001`.`supplier`.`s_suppkey` and `dbt3_s001`.`part`.`p_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`lineitem`.`l_partkey` = `dbt3_s001`.`partsupp`.`ps_partkey` and `dbt3_s001`.`orders`.`o_orderkey` = `dbt3_s001`.`lineitem`.`l_orderkey` and `dbt3_s001`.`nation`.`n_nationkey` = `dbt3_s001`.`supplier`.`s_nationkey` and `dbt3_s001`.`part`.`p_name` like '%green%' group by `dbt3_s001`.`nation`.`n_name`,extract(year from `dbt3_s001`.`orders`.`o_orderDATE`) desc order by `dbt3_s001`.`nation`.`n_name`,extract(year from `dbt3_s001`.`orders`.`o_orderDATE`) desc
select nation, o_year, sum(amount) as sum_profit
from (select n_name as nation,
extract(year from o_orderdate) as o_year,
@@ -444,7 +464,7 @@ EXPLAIN
SELECT * FROM t1 STRAIGHT_JOIN t2 WHERE name IN ( 'AUS','YEM' ) AND id = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 Using index
-1 SIMPLE t2 ALL NULL NULL NULL NULL 0 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 1 Using where
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
test.t2 analyze status Engine-independent statistics collected
@@ -623,8 +643,8 @@ set @@use_stat_tables= PREFERABLY;
explain
SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.user;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE PROFILING ALL NULL NULL NULL NULL NULL
-1 SIMPLE global_priv ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
+1 SIMPLE global_priv ALL NULL NULL NULL NULL 5
+1 SIMPLE PROFILING ALL NULL NULL NULL NULL NULL Using join buffer (flat, BNL join)
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
set use_stat_tables=@save_use_stat_tables;
#
diff --git a/mysql-test/main/statistics_json.result b/mysql-test/main/statistics_json.result
index 7587cff0d48..7b7cc86642c 100644
--- a/mysql-test/main/statistics_json.result
+++ b/mysql-test/main/statistics_json.result
@@ -8338,12 +8338,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 18,
+ "cost": "COST_REPLACED",
"filtered": 5.555555344,
"attached_condition": "t1.a > 'y'"
}
diff --git a/mysql-test/main/statistics_json.test b/mysql-test/main/statistics_json.test
index 5263a98fff7..b9a5c3dbea2 100644
--- a/mysql-test/main/statistics_json.test
+++ b/mysql-test/main/statistics_json.test
@@ -477,6 +477,7 @@ INSERT INTO t1 VALUES ('o'),('s'),('j'),('s'),('y'),('s'),('l'),
set histogram_type=json_hb;
analyze table t1 persistent for all;
--echo # filtered must not be negative:
+--source include/explain-no-costs.inc
explain format=json select * from t1 where a > 'y';
drop table t1;
diff --git a/mysql-test/main/statistics_upgrade.test b/mysql-test/main/statistics_upgrade.test
index 9f47ab1a83b..72de08be166 100644
--- a/mysql-test/main/statistics_upgrade.test
+++ b/mysql-test/main/statistics_upgrade.test
@@ -44,8 +44,8 @@ alter table t3 rename mysql.column_stats;
--exec $MYSQL_UPGRADE --upgrade-system-tables --force --silent 2>&1
let $MYSQLD_DATADIR= `select @@datadir`;
---file_exists $MYSQLD_DATADIR/mysql_upgrade_info
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--file_exists $MYSQLD_DATADIR/mariadb_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
--echo #
--echo # Table definition after upgrade:
diff --git a/mysql-test/main/statistics_upgrade_not_done.test b/mysql-test/main/statistics_upgrade_not_done.test
index a74a5f35d83..d38a387bd27 100644
--- a/mysql-test/main/statistics_upgrade_not_done.test
+++ b/mysql-test/main/statistics_upgrade_not_done.test
@@ -50,8 +50,8 @@ select * from mysql.column_stats where table_name='t2' and db_name='test';
--exec $MYSQL_UPGRADE --upgrade-system-tables --force --silent 2>&1
let $MYSQLD_DATADIR= `select @@datadir`;
---file_exists $MYSQLD_DATADIR/mysql_upgrade_info
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--file_exists $MYSQLD_DATADIR/mariadb_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
analyze select * from t0 where a<3;
drop table t0, t1, t2;
diff --git a/mysql-test/main/status.result b/mysql-test/main/status.result
index d17bd9c6a61..0669bdf3b34 100644
--- a/mysql-test/main/status.result
+++ b/mysql-test/main/status.result
@@ -71,10 +71,10 @@ a
6
show status like 'last_query_cost';
Variable_name Value
-Last_query_cost 12.084449
+Last_query_cost 0.017856
show status like 'last_query_cost';
Variable_name Value
-Last_query_cost 12.084449
+Last_query_cost 0.017856
select 1;
1
1
@@ -134,20 +134,20 @@ a
1
SHOW SESSION STATUS LIKE 'Last_query_cost';
Variable_name Value
-Last_query_cost 2.402418
+Last_query_cost 0.010348
EXPLAIN SELECT a FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
SHOW SESSION STATUS LIKE 'Last_query_cost';
Variable_name Value
-Last_query_cost 2.402418
+Last_query_cost 0.010348
SELECT a FROM t1 UNION SELECT a FROM t1 ORDER BY a;
a
1
2
SHOW SESSION STATUS LIKE 'Last_query_cost';
Variable_name Value
-Last_query_cost 0.000000
+Last_query_cost 0.010348
EXPLAIN SELECT a FROM t1 UNION SELECT a FROM t1 ORDER BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
@@ -155,25 +155,25 @@ id select_type table type possible_keys key key_len ref rows Extra
NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL Using filesort
SHOW SESSION STATUS LIKE 'Last_query_cost';
Variable_name Value
-Last_query_cost 0.000000
+Last_query_cost 0.010348
SELECT a IN (SELECT a FROM t1) FROM t1 LIMIT 1;
a IN (SELECT a FROM t1)
1
SHOW SESSION STATUS LIKE 'Last_query_cost';
Variable_name Value
-Last_query_cost 0.000000
+Last_query_cost 0.010348
SELECT (SELECT a FROM t1 LIMIT 1) x FROM t1 LIMIT 1;
x
1
SHOW SESSION STATUS LIKE 'Last_query_cost';
Variable_name Value
-Last_query_cost 0.000000
+Last_query_cost 0.010348
SELECT * FROM t1 a, t1 b LIMIT 1;
a a
1 1
SHOW SESSION STATUS LIKE 'Last_query_cost';
Variable_name Value
-Last_query_cost 5.205836
+Last_query_cost 0.021190
DROP TABLE t1;
connect con1,localhost,root,,;
show status like 'com_show_status';
diff --git a/mysql-test/main/subselect.result b/mysql-test/main/subselect.result
index 4209e2bc529..2b3050c3ca2 100644
--- a/mysql-test/main/subselect.result
+++ b/mysql-test/main/subselect.result
@@ -345,7 +345,7 @@ patient_uq clinic_uq
explain extended select * from t6 where exists (select * from t7 where uq = clinic_uq);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t7 index PRIMARY PRIMARY 4 NULL 2 100.00 Using index
-1 PRIMARY t6 ALL i1 NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t6 ref i1 i1 5 test.t7.uq 1 100.00
Warnings:
Note 1276 Field or reference 'test.t6.clinic_uq' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t6`.`patient_uq` AS `patient_uq`,`test`.`t6`.`clinic_uq` AS `clinic_uq` from `test`.`t7` join `test`.`t6` where `test`.`t6`.`clinic_uq` = `test`.`t7`.`uq`
@@ -895,6 +895,9 @@ select (select a+1) from t1;
NULL
4.5
drop table t1;
+#
+# Null with keys
+#
CREATE TABLE t1 (a int(11) NOT NULL default '0', PRIMARY KEY (a));
CREATE TABLE t2 (a int(11) default '0', INDEX (a));
INSERT INTO t1 VALUES (1),(2),(3),(4);
@@ -1426,6 +1429,9 @@ drop table if exists t1;
(SELECT 1 as a) UNION (SELECT 1) ORDER BY (SELECT a+0);
a
1
+#
+# IN subselect optimization test
+#
create table t1 (a int not null, b int, primary key (a));
create table t2 (a int not null, primary key (a));
create table t3 (a int not null, b int, primary key (a));
@@ -1449,21 +1455,21 @@ a
4
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using where
+1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 100.00 Using where
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00 Using index
Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
a
2
3
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00
-1 PRIMARY t3 index PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 100.00 Using where
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00 Using index
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 100.00 Using index
Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t1`.`a` = `test`.`t2`.`a`
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t2`.`a` = `test`.`t1`.`a`
drop table t1, t2, t3;
create table t1 (a int, b int, index a (a,b));
create table t2 (a int, index a (a));
@@ -1472,42 +1478,48 @@ insert into t1 values (1,10), (2,20), (3,30), (4,40);
create table t0(a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
insert into t1
-select rand()*100000+200,rand()*100000 from t0 A, t0 B, t0 C, t0 D;
+select rand()*100000+200,rand(1)*100000 from t0 A, t0 B, t0 C, t0 D;
insert into t2 values (2), (3), (4), (5);
insert into t3 values (10,3), (20,4), (30,5);
-select * from t2 where t2.a in (select a from t1);
-a
-2
-3
-4
explain extended select * from t2 where t2.a in (select a from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a`
-select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+select * from t2 where t2.a in (select a from t1);
a
2
+3
4
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using where; Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using where; Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
-select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
-3
+4
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
1 PRIMARY t3 range a a 5 NULL 3 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 10 test.t2.a,test.t3.a 116 100.00 Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 10 test.t2.a,test.t3.a 116 0.29 Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1` join `test`.`t3`) where `test`.`t1`.`b` = `test`.`t3`.`a` and `test`.`t1`.`a` = `test`.`t2`.`a`
+select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+a
+2
+3
insert into t1 values (3,31);
+explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using where; Using index; FirstMatch(t2)
+Warnings:
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
@@ -1517,12 +1529,6 @@ select * from t2 where t2.a in (select a from t1 where t1.b <> 30 and t1.b <> 31
a
2
4
-explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using where; Using index; FirstMatch(t2)
-Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
drop table t0, t1, t2, t3;
create table t1 (a int, b int);
create table t2 (a int, b int);
@@ -1584,6 +1590,9 @@ Note 1003 (select 'tttt' AS `s1` from dual)
s1
tttt
drop table t1;
+#
+# IN optimisation test results
+#
create table t1 (s1 char(5), index s1(s1));
create table t2 (s1 char(5), index s1(s1));
insert into t1 values ('a1'),('a2'),('a3');
@@ -2417,20 +2426,23 @@ a
1
3
DROP TABLE t1;
+#
+# SELECT(EXISTS * ...)optimisation
+#
create table t1 (a int, b int);
-insert into t1 values (1,2),(3,4);
-select * from t1 up where exists (select * from t1 where t1.a=up.a);
-a b
-1 2
-3 4
-explain extended select * from t1 up where exists (select * from t1 where t1.a=up.a);
+insert into t1 values (1,2),(3,4),(5,6),(7,8);
+insert into t1 select seq,seq from seq_20_to_40;
+select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
+sum(a+b)
+1296
+explain extended select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY up ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY up ALL NULL NULL NULL NULL 25 100.00
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 2 100.00
+2 MATERIALIZED t1 ALL NULL NULL NULL NULL 25 100.00
Warnings:
Note 1276 Field or reference 'test.up.a' of SELECT #2 was resolved in SELECT #1
-Note 1003 select `test`.`up`.`a` AS `a`,`test`.`up`.`b` AS `b` from `test`.`t1` `up` semi join (`test`.`t1`) where 1
+Note 1003 select sum(`test`.`up`.`a` + `test`.`up`.`b`) AS `sum(a+b)` from `test`.`t1` `up` semi join (`test`.`t1`) where 1
drop table t1;
CREATE TABLE t1 (t1_a int);
INSERT INTO t1 VALUES (1);
@@ -3098,9 +3110,13 @@ retailerID statusID changed
0048 1 2006-01-06 12:37:50
0059 1 2006-01-06 12:37:50
drop table t1;
+#
+# Bug#21180 Subselect with index for both WHERE and ORDER BY
+# produces empty result
+#
create table t1(a int, primary key (a));
insert into t1 values (10);
-create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
+create table t2 (a int primary key, b varchar(32), c int, unique key cb(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
analyze table t1;
@@ -3113,7 +3129,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using where
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3125,7 +3141,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -4225,8 +4241,8 @@ INSERT INTO t2 VALUES (7), (5), (1), (3);
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id);
id st
-3 FL
1 GA
+3 FL
7 FL
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id)
@@ -4327,6 +4343,9 @@ SELECT ((a1,a2) IN (SELECT * FROM t2 WHERE b2 > 0)) IS NULL FROM t1;
0
0
DROP TABLE t1, t2;
+#
+# Bug#28076 inconsistent binary/varbinary comparison
+#
CREATE TABLE t1 (s1 BINARY(5), s2 VARBINARY(5));
INSERT INTO t1 VALUES (0x41,0x41), (0x42,0x42), (0x43,0x43);
SELECT s1, s2 FROM t1 WHERE s2 IN (SELECT s1 FROM t1);
@@ -4388,8 +4407,8 @@ CREATE INDEX I1 ON t1 (a);
CREATE INDEX I2 ON t1 (b);
EXPLAIN SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t1 ref I1 I1 2 test.t1.b 2 Using where; Using index; FirstMatch(t1)
+1 PRIMARY t1 index I1 I1 2 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t1 ref I2 I2 13 test.t1.a 1 Using index condition
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
a b
CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(10));
@@ -4398,15 +4417,15 @@ CREATE INDEX I1 ON t2 (a);
CREATE INDEX I2 ON t2 (b);
EXPLAIN SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t2 ref I1 I1 4 test.t2.b 2 Using where; Using index; FirstMatch(t2)
+1 PRIMARY t2 index I1 I1 4 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t2 ref I2 I2 13 test.t2.a 1 Using index condition
SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
a b
EXPLAIN
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t1 ref I1 I1 2 test.t1.b 2 Using where; Using index; FirstMatch(t1)
+1 PRIMARY t1 index I1 I1 2 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t1 ref I2 I2 13 test.t1.a 1 Using index condition
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
a b
DROP TABLE t1,t2;
@@ -4436,10 +4455,13 @@ out_a MIN(b)
1 2
2 4
DROP TABLE t1;
+#
+# Bug#32036 EXISTS within a WHERE clause with a UNION crashes MySQL 5.122
+#
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (a INT);
-INSERT INTO t1 VALUES (1),(2);
-INSERT INTO t2 VALUES (1),(2);
+INSERT INTO t1 VALUES (1),(2),(3),(4);
+INSERT INTO t2 VALUES (1),(2),(1000);
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
2
2
@@ -4447,19 +4469,18 @@ SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 33.33 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
-Note 1003 select 2 AS `2` from `test`.`t1` semi join (`test`.`t2`) where 1
+Note 1003 select 2 AS `2` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`a` = `test`.`t1`.`a`
EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a) UNION
(SELECT 1 FROM t2 WHERE t1.a = t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 3 100.00 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
@@ -5698,7 +5719,8 @@ DROP TABLE IF EXISTS ot1, ot4, it2, it3;
CREATE TABLE t1 (a int) ;
INSERT INTO t1 VALUES (NULL), (1), (NULL), (2);
CREATE TABLE t2 (a int, INDEX idx(a)) ;
-INSERT INTO t2 VALUES (NULL), (1), (NULL);
+INSERT INTO t2 VALUES (NULL), (1), (NULL),(1000);
+insert into t2 select seq from seq_3_to_500;
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 USE INDEX () WHERE t2.a = t1.a);
a
@@ -5709,7 +5731,7 @@ WHERE EXISTS (SELECT a FROM t2 USE INDEX() WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 502
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
a
@@ -5718,9 +5740,8 @@ EXPLAIN
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 index idx idx 5 NULL 3 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 11 Using index; FirstMatch(t1)
DROP TABLE t1,t2;
#
# BUG#752992: Wrong results for a subquery with 'semijoin=on'
@@ -5737,9 +5758,9 @@ SET @save_join_cache_level=@@join_cache_level;
SET join_cache_level=0;
EXPLAIN SELECT * FROM t1 WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON 1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 3
-1 PRIMARY it eq_ref PRIMARY PRIMARY 4 test.t1.pk 1 Using index
-1 PRIMARY t2 index NULL PRIMARY 4 NULL 3 Using index; FirstMatch(it)
+1 PRIMARY it index PRIMARY PRIMARY 4 NULL 3 Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.it.pk 1
+1 PRIMARY t2 index NULL PRIMARY 4 NULL 3 Using index; FirstMatch(t1)
SELECT * FROM t1 WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON 1);
pk i
11 0
@@ -6080,8 +6101,7 @@ WHERE col_varchar_nokey IN
(SELECT col_varchar_key FROM it1 WHERE col_int_key IS NULL);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY ot system NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED it1 ref idx_cvk_cik idx_cvk_cik 9 const,const 1 Using where; Using index
+1 PRIMARY it1 ref idx_cvk_cik idx_cvk_cik 9 const,const 1 Using where; Using index; FirstMatch(ot)
SELECT col_int_nokey FROM ot
WHERE col_varchar_nokey IN
(SELECT col_varchar_key FROM it1 WHERE col_int_key IS NULL);
@@ -6093,8 +6113,7 @@ WHERE (col_varchar_nokey, 'x') IN
(SELECT col_varchar_key, col_varchar_key2 FROM it2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY ot system NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1
-2 MATERIALIZED it2 ref idx_cvk_cvk2_cik,idx_cvk_cik idx_cvk_cvk2_cik 8 const,const 1 Using where; Using index
+1 PRIMARY it2 ref idx_cvk_cvk2_cik,idx_cvk_cik idx_cvk_cvk2_cik 8 const,const 1 Using where; Using index; FirstMatch(ot)
SELECT col_int_nokey FROM ot
WHERE (col_varchar_nokey, 'x') IN
(SELECT col_varchar_key, col_varchar_key2 FROM it2);
@@ -6638,7 +6657,7 @@ SET @@optimizer_switch='semijoin=off,materialization=off,in_to_exists=on,subquer
EXPLAIN SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index NULL a 4 NULL 2 Using where; Using index
-2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 2 Using index
+2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 1 Using index
SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
a
2009-01-01
@@ -6841,7 +6860,7 @@ FROM t1 AS alias1, t1 AS alias2, t1 AS alias3
WHERE alias1.a = alias2.a OR ('Moscow') IN ( SELECT a FROM t1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY alias1 index a a 19 NULL 11 Using where; Using index
-1 PRIMARY alias2 ref a a 19 test.alias1.a 2 Using index
+1 PRIMARY alias2 ref a a 19 test.alias1.a 1 Using index
1 PRIMARY alias3 index NULL a 19 NULL 11 Using index; Using join buffer (flat, BNL join)
2 SUBQUERY t1 index_subquery a a 19 const 1 Using index; Using where
SELECT MAX( alias2.a )
@@ -6992,7 +7011,7 @@ WHERE SLEEP(0.1) OR c < 'p' OR b = ( SELECT MIN(b) FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
1 PRIMARY t2 ALL b NULL NULL NULL 2 Using where
-1 PRIMARY t3 ref d d 5 test.t2.b 2 Using index
+1 PRIMARY t3 ref d d 5 test.t2.b 1 Using index
3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
set @tmp_mdev410=@@global.userstat;
set global userstat=on;
@@ -7024,7 +7043,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-435: Expensive subqueries may be evaluated during optimization in merge_key_fields
@@ -7058,7 +7077,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-5991: crash in Item_field::used_tables
diff --git a/mysql-test/main/subselect.test b/mysql-test/main/subselect.test
index be22169a1d6..48ad7402755 100644
--- a/mysql-test/main/subselect.test
+++ b/mysql-test/main/subselect.test
@@ -7,6 +7,7 @@
# as possible.
#
# Initialise
+--source include/have_sequence.inc
--disable_warnings
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t11,t12;
drop view if exists v2;
@@ -537,9 +538,9 @@ select (select a+1) from t1;
drop table t1;
--enable_view_protocol
-#
-# Null with keys
-#
+--echo #
+--echo # Null with keys
+--echo #
CREATE TABLE t1 (a int(11) NOT NULL default '0', PRIMARY KEY (a));
CREATE TABLE t2 (a int(11) default '0', INDEX (a));
@@ -906,9 +907,9 @@ drop table if exists t1;
(SELECT 1 as a) UNION (SELECT 1) ORDER BY (SELECT a+0);
-#
-# IN subselect optimization test
-#
+--echo #
+--echo # IN subselect optimization test
+--echo #
create table t1 (a int not null, b int, primary key (a));
create table t2 (a int not null, primary key (a));
create table t3 (a int not null, b int, primary key (a));
@@ -930,20 +931,20 @@ insert into t1 values (1,10), (2,20), (3,30), (4,40);
create table t0(a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
insert into t1
-select rand()*100000+200,rand()*100000 from t0 A, t0 B, t0 C, t0 D;
+select rand()*100000+200,rand(1)*100000 from t0 A, t0 B, t0 C, t0 D;
insert into t2 values (2), (3), (4), (5);
insert into t3 values (10,3), (20,4), (30,5);
-select * from t2 where t2.a in (select a from t1);
explain extended select * from t2 where t2.a in (select a from t1);
-select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+select * from t2 where t2.a in (select a from t1);
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
-select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
insert into t1 values (3,31);
+explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
select * from t2 where t2.a in (select a from t1 where t1.b <> 30 and t1.b <> 31);
-explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
drop table t0, t1, t2, t3;
#
@@ -1017,13 +1018,14 @@ explain extended (select * from t1);
(select * from t1);
drop table t1;
-#
-# IN optimisation test results
-#
+--echo #
+--echo # IN optimisation test results
+--echo #
create table t1 (s1 char(5), index s1(s1));
create table t2 (s1 char(5), index s1(s1));
insert into t1 values ('a1'),('a2'),('a3');
insert into t2 values ('a1'),('a2');
+
select s1, s1 NOT IN (SELECT s1 FROM t2) from t1;
select s1, s1 = ANY (SELECT s1 FROM t2) from t1;
select s1, s1 <> ALL (SELECT s1 FROM t2) from t1;
@@ -1450,15 +1452,16 @@ SELECT a FROM t1 WHERE a <> ALL (SELECT a FROM t1 WHERE b = '2');
DROP TABLE t1;
-#
-# SELECT(EXISTS * ...)optimisation
-#
+--echo #
+--echo # SELECT(EXISTS * ...)optimisation
+--echo #
#--view-protocol is disabled because view gives another query plan
--disable_view_protocol
create table t1 (a int, b int);
-insert into t1 values (1,2),(3,4);
-select * from t1 up where exists (select * from t1 where t1.a=up.a);
-explain extended select * from t1 up where exists (select * from t1 where t1.a=up.a);
+insert into t1 values (1,2),(3,4),(5,6),(7,8);
+insert into t1 select seq,seq from seq_20_to_40;
+select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
+explain extended select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
drop table t1;
--enable_view_protocol
@@ -2059,14 +2062,14 @@ select * from t1 r1
drop table t1;
-#
-# Bug#21180 Subselect with index for both WHERE and ORDER BY
-# produces empty result
-#
+--echo #
+--echo # Bug#21180 Subselect with index for both WHERE and ORDER BY
+--echo # produces empty result
+--echo #
create table t1(a int, primary key (a));
insert into t1 values (10);
-create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
+create table t2 (a int primary key, b varchar(32), c int, unique key cb(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
analyze table t1;
@@ -3098,6 +3101,7 @@ INSERT INTO t1 VALUES
CREATE TABLE t2 (id int NOT NULL, INDEX idx(id));
INSERT INTO t2 VALUES (7), (5), (1), (3);
+--sorted_result
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id);
SELECT id, st FROM t1
@@ -3272,9 +3276,9 @@ DROP TABLE t1, t2;
#--enable_query_log
#drop table t1;
-#
-# Bug#28076 inconsistent binary/varbinary comparison
-#
+--echo #
+--echo # Bug#28076 inconsistent binary/varbinary comparison
+--echo #
CREATE TABLE t1 (s1 BINARY(5), s2 VARBINARY(5));
INSERT INTO t1 VALUES (0x41,0x41), (0x42,0x42), (0x43,0x43);
@@ -3376,15 +3380,15 @@ GROUP BY a;
DROP TABLE t1;
-#
-# Bug#32036 EXISTS within a WHERE clause with a UNION crashes MySQL 5.122
-#
+--echo #
+--echo # Bug#32036 EXISTS within a WHERE clause with a UNION crashes MySQL 5.122
+--echo #
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (a INT);
-INSERT INTO t1 VALUES (1),(2);
-INSERT INTO t2 VALUES (1),(2);
+INSERT INTO t1 VALUES (1),(2),(3),(4);
+INSERT INTO t2 VALUES (1),(2),(1000);
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
EXPLAIN EXTENDED
@@ -4853,7 +4857,8 @@ CREATE TABLE t1 (a int) ;
INSERT INTO t1 VALUES (NULL), (1), (NULL), (2);
CREATE TABLE t2 (a int, INDEX idx(a)) ;
-INSERT INTO t2 VALUES (NULL), (1), (NULL);
+INSERT INTO t2 VALUES (NULL), (1), (NULL),(1000);
+insert into t2 select seq from seq_3_to_500;
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 USE INDEX () WHERE t2.a = t1.a);
diff --git a/mysql-test/main/subselect2.result b/mysql-test/main/subselect2.result
index db6c85900ad..0d2f7372887 100644
--- a/mysql-test/main/subselect2.result
+++ b/mysql-test/main/subselect2.result
@@ -132,7 +132,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3_b eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3_a.PARENTID 1 Using where
1 PRIMARY t3_c eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3_b.PARENTID 1 Using where
1 PRIMARY t3_d eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3_c.PARENTID 1 Using where
-1 PRIMARY t3_e ref|filter PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX FFOLDERID_IDX|CMFLDRPARNT_IDX 34|35 test.t3_d.PARENTID 1 (29%) Using where; Using rowid filter
+1 PRIMARY t3_e eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3_d.PARENTID 1 Using where
drop table t1, t2, t3, t4;
CREATE TABLE t1 (a int(10) , PRIMARY KEY (a)) Engine=InnoDB;
INSERT INTO t1 VALUES (1),(2);
@@ -162,8 +162,8 @@ EXPLAIN
SELECT * FROM t2,t3 WHERE (2,9) IN (SELECT DISTINCT a,pk FROM t1) OR a = b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 index a a 5 NULL 2 Using where; Using index
-1 PRIMARY t3 ref b b 5 test.t2.a 2 Using index
-2 SUBQUERY t1 const PRIMARY,a PRIMARY 4 const 1 Using where
+1 PRIMARY t3 ref b b 5 test.t2.a 1 Using index
+2 SUBQUERY t1 const PRIMARY,a a 9 const,const 1 Using where; Using index
SELECT * FROM t2,t3 WHERE (2,9) IN (SELECT DISTINCT a,pk FROM t1) OR a = b;
pk a b
0 4 4
@@ -171,8 +171,8 @@ EXPLAIN
SELECT * FROM t2,t3 WHERE (2,9) IN (SELECT DISTINCT a,pk FROM v1) OR a = b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 index a a 5 NULL 2 Using where; Using index
-1 PRIMARY t3 ref b b 5 test.t2.a 2 Using index
-2 SUBQUERY t1 const PRIMARY,a PRIMARY 4 const 1 Using where
+1 PRIMARY t3 ref b b 5 test.t2.a 1 Using index
+2 SUBQUERY t1 const PRIMARY,a a 9 const,const 1 Using where; Using index
SELECT * FROM t2,t3 WHERE (2,9) IN (SELECT DISTINCT a,pk FROM v1) OR a = b;
pk a b
0 4 4
@@ -277,7 +277,7 @@ KEY `date` (`date`)
INSERT INTO `t1` VALUES (2085,'2012-01-01 00:00:00','2013-01-01 00:00:00');
INSERT INTO `t1` VALUES (2084,'2012-02-01 00:00:00','2013-01-01 00:00:00');
INSERT INTO `t1` VALUES (2088,'2012-03-01 00:00:00','2013-01-01 00:00:00');
-explain
+set statement optimizer_scan_setup_cost=0 for explain
SELECT * FROM (
SELECT node_uid, date, mirror_date, @result := 0 AS result
FROM t1
@@ -286,9 +286,9 @@ WHERE date < '2012-12-12 12:12:12'
ORDER BY mirror_date ASC
) AS calculated_result;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3
-2 DERIVED t1 range date date 6 NULL 3 Using index condition; Using where; Rowid-ordered scan; Using filesort
-SELECT * FROM (
+1 PRIMARY <derived3> ALL NULL NULL NULL NULL 3
+3 DERIVED t1 ALL date NULL NULL NULL 3 Using where; Using filesort
+set statement optimizer_scan_setup_cost=0 FOR SELECT * FROM (
SELECT node_uid, date, mirror_date, @result := 0 AS result
FROM t1
WHERE date < '2012-12-12 12:12:12'
@@ -339,7 +339,7 @@ where t1.a = t2.a and ( t1.a = ( select min(a) from t1 ) or 0 );
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY t3 ref idx idx 6 func 2 100.00 Using where; Using index
+1 PRIMARY t3 ref idx idx 6 func 1 100.00 Using where; Using index
2 SUBQUERY t1 ALL NULL NULL NULL NULL 2 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t2`.`a` AS `a`,`test`.`t3`.`a` AS `a` from `test`.`t1` join `test`.`t2` left join `test`.`t3` on(`test`.`t3`.`a` = `test`.`t1`.`a`) where `test`.`t1`.`a` = (/* select#2 */ select min(`test`.`t1`.`a`) from `test`.`t1`) and `test`.`t2`.`a` = (/* select#2 */ select min(`test`.`t1`.`a`) from `test`.`t1`)
diff --git a/mysql-test/main/subselect2.test b/mysql-test/main/subselect2.test
index b341e516941..0780a5b9319 100644
--- a/mysql-test/main/subselect2.test
+++ b/mysql-test/main/subselect2.test
@@ -303,7 +303,7 @@ INSERT INTO `t1` VALUES (2085,'2012-01-01 00:00:00','2013-01-01 00:00:00');
INSERT INTO `t1` VALUES (2084,'2012-02-01 00:00:00','2013-01-01 00:00:00');
INSERT INTO `t1` VALUES (2088,'2012-03-01 00:00:00','2013-01-01 00:00:00');
-explain
+set statement optimizer_scan_setup_cost=0 for explain
SELECT * FROM (
SELECT node_uid, date, mirror_date, @result := 0 AS result
FROM t1
@@ -312,7 +312,7 @@ SELECT * FROM (
ORDER BY mirror_date ASC
) AS calculated_result;
-SELECT * FROM (
+set statement optimizer_scan_setup_cost=0 FOR SELECT * FROM (
SELECT node_uid, date, mirror_date, @result := 0 AS result
FROM t1
WHERE date < '2012-12-12 12:12:12'
diff --git a/mysql-test/main/subselect3.inc b/mysql-test/main/subselect3.inc
index af7b45542bd..c9d20800026 100644
--- a/mysql-test/main/subselect3.inc
+++ b/mysql-test/main/subselect3.inc
@@ -242,8 +242,9 @@ from t2;
drop table t1,t2,t3,t4;
-# More tests for tricky multi-column cases, where some of pushed-down
-# equalities are used for index lookups and some arent.
+--echo # More tests for tricky multi-column cases, where some of pushed-down
+--echo # equalities are used for index lookups and some are not.
+
create table t1 (oref char(4), grp int, ie1 int, ie2 int);
insert into t1 (oref, grp, ie1, ie2) values
('aa', 10, 2, 1),
@@ -470,8 +471,11 @@ explain select oref, a, a in (select ie from t1 where oref=t2.oref) Z from t2;
select oref, a, a in (select ie from t1 where oref=t2.oref) Z from t2;
+explain select oref, a from t2 where a in (select ie from t1 where oref=t2.oref);
+
select oref, a from t2 where a in (select ie from t1 where oref=t2.oref);
+explain select oref, a from t2 where a not in (select ie from t1 where oref=t2.oref);
select oref, a from t2 where a not in (select ie from t1 where oref=t2.oref);
explain
@@ -1012,7 +1016,6 @@ explain select * from t1 where a in (select a from t1);
drop table t1;
set @@optimizer_switch=@save_optimizer_switch;
-set @@optimizer_switch=@save_optimizer_switch;
set @@optimizer_switch='materialization=off';
#
@@ -1034,9 +1037,9 @@ explain select * from t2 where a in (select straight_join A.a from t1 A, t1 B);
explain select straight_join * from t2 X, t2 Y
where X.a in (select straight_join A.a from t1 A, t1 B);
-#
-# SJ-Materialization scan + first table being system const table
-#
+--echo #
+--echo # SJ-Materialization scan + first table being system const table
+--echo #
create table t0 (a int, b int);
insert into t0 values(1,1);
explain select * from t0, t3 where t3.a in (select a from t2) and (t3.a < 10 or t3.a >30);
@@ -1044,9 +1047,12 @@ create table t4 as select a as x, a as y from t1;
explain select * from t0, t3 where (t3.a, t3.b) in (select x,y from t4) and (t3.a < 10 or t3.a >30);
drop table t0,t1,t2,t3,t4;
-#
-# LooseScan with ref access
-#
+--echo #
+--echo # LooseScan with ref access
+--echo #
+
+set @@optimizer_switch='join_cache_hashed=off';
+
create table t0 (a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 (a int, b int, filler char(100), key(a,b));
@@ -1082,9 +1088,9 @@ drop table t0, t1, t2;
set @@optimizer_switch='materialization=off';
-#
-# Primitive SJ-Materialization tests for DECIMAL and DATE
-#
+--echo #
+--echo # Primitive SJ-Materialization tests for DECIMAL and DATE
+--echo #
create table t0 (a decimal(4,2));
insert into t0 values (10.24), (22.11);
create table t1 as select * from t0;
@@ -1110,6 +1116,7 @@ insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 as select a as a, a as b, a as c from t0 where a < 3;
create table t2 as select a as a, a as b from t0 where a < 3;
insert into t2 select * from t2;
+select count(*) from t2;
explain select * from t1 where (a,b,c) in (select X.a, Y.a, Z.a from t2 X, t2 Y, t2 Z where X.b=33);
@@ -1155,7 +1162,7 @@ drop table t0, t1;
create table t1 (
idIndividual int primary key
);
-insert into t1 values (1),(2);
+insert into t1 values (1),(2),(1000);
create table t2 (
idContact int primary key,
@@ -1170,7 +1177,7 @@ create table t3 (
postalStripped varchar(100)
);
-insert into t3 values (1,1, 'foo'), (2,2,'bar');
+insert into t3 values (1,1, 'foo'), (2,2,'T2H3B2');
--echo The following must be converted to a semi-join:
set @save_optimizer_switch=@@optimizer_switch;
@@ -1181,6 +1188,12 @@ WHERE a.idIndividual IN
INNER JOIN t2 c ON c.idContact=cona.idContact
WHERE cona.postalStripped='T2H3B2'
);
+SELECT a.idIndividual FROM t1 a
+WHERE a.idIndividual IN
+ ( SELECT c.idObj FROM t3 cona
+ INNER JOIN t2 c ON c.idContact=cona.idContact
+ WHERE cona.postalStripped='T2H3B2'
+ );
set @@optimizer_switch=@save_optimizer_switch;
drop table t1,t2,t3;
diff --git a/mysql-test/main/subselect3.result b/mysql-test/main/subselect3.result
index 28187e0ffdd..c9a1ea6ca61 100644
--- a/mysql-test/main/subselect3.result
+++ b/mysql-test/main/subselect3.result
@@ -96,10 +96,10 @@ explain extended
select oref, a, a in (select a from t1 where oref=t2.oref) Z from t2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 100.00
-2 DEPENDENT SUBQUERY t1 ALL a NULL NULL NULL 8 100.00 Using where
+2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 3 100.00 Using where; Full scan on NULL key
Warnings:
Note 1276 Field or reference 'test.t2.oref' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `test`.`t2`.`oref` AS `oref`,`test`.`t2`.`a` AS `a`,<expr_cache><`test`.`t2`.`a`,`test`.`t2`.`oref`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` where `test`.`t1`.`oref` = `test`.`t2`.`oref` and trigcond(<cache>(`test`.`t2`.`a`) = `test`.`t1`.`a` or `test`.`t1`.`a` is null) having trigcond(`test`.`t1`.`a` is null)))) AS `Z` from `test`.`t2`
+Note 1003 /* select#1 */ select `test`.`t2`.`oref` AS `oref`,`test`.`t2`.`a` AS `a`,<expr_cache><`test`.`t2`.`a`,`test`.`t2`.`oref`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on a checking NULL where `test`.`t1`.`oref` = `test`.`t2`.`oref` having trigcond(`test`.`t1`.`a` is null))))) AS `Z` from `test`.`t2`
flush status;
select oref, a from t2 where a in (select a from t1 where oref=t2.oref);
oref a
@@ -193,7 +193,7 @@ t3.a in (select t1.a from t1, t2 where t1.b=t2.a and t2.b=t3.oref) Z
from t3;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 3 100.00
-2 DEPENDENT SUBQUERY t1 ref a a 4 func 2 100.00 Using where; Full scan on NULL key
+2 DEPENDENT SUBQUERY t1 ref a a 4 func 1 100.00 Using where; Full scan on NULL key
2 DEPENDENT SUBQUERY t2 ref a a 4 test.t1.b 1 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.t3.oref' of SELECT #2 was resolved in SELECT #1
@@ -278,6 +278,8 @@ a b oref Z
NULL 1 100 0
NULL 2 100 NULL
drop table t1,t2,t3,t4;
+# More tests for tricky multi-column cases, where some of pushed-down
+# equalities are used for index lookups and some are not.
create table t1 (oref char(4), grp int, ie1 int, ie2 int);
insert into t1 (oref, grp, ie1, ie2) values
('aa', 10, 2, 1),
@@ -617,10 +619,18 @@ cc 2 0
cc NULL NULL
aa 1 1
bb NULL NULL
+explain select oref, a from t2 where a in (select ie from t1 where oref=t2.oref);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 7 Using where
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 Using where
select oref, a from t2 where a in (select ie from t1 where oref=t2.oref);
oref a
cc 5
aa 1
+explain select oref, a from t2 where a not in (select ie from t1 where oref=t2.oref);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 7 Using where
+2 DEPENDENT SUBQUERY t1 unique_subquery PRIMARY PRIMARY 4 func 1 Using where; Full scan on NULL key
select oref, a from t2 where a not in (select ie from t1 where oref=t2.oref);
oref a
ee NULL
@@ -1157,9 +1167,9 @@ set @@optimizer_switch='firstmatch=off,materialization=off';
set @@max_heap_table_size= 16384;
explain select count(*) from t0 A, t0 B, t0 C, t0 D where D.a in (select a from t1 E where a+1 < 10000 + A.a + B.a +C.a+D.a);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY A ALL NULL NULL NULL NULL 10
+1 PRIMARY E ALL NULL NULL NULL NULL 5 Start temporary
+1 PRIMARY A ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
1 PRIMARY B ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
-1 PRIMARY E ALL NULL NULL NULL NULL 5 Start temporary; Using join buffer (flat, BNL join)
1 PRIMARY C ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
1 PRIMARY D ALL NULL NULL NULL NULL 10 Using where; End temporary; Using join buffer (flat, BNL join)
flush status;
@@ -1180,9 +1190,8 @@ create table t3 ( a int , filler char(100), key(a));
insert into t3 select A.a + 10*B.a, 'filler' from t0 A, t0 B;
explain select * from t3 where a in (select a from t2) and (a > 5 or a < 10);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t3 ref a a 5 test.t2.a 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Start temporary
+1 PRIMARY t3 ref a a 5 test.t2.a 1 End temporary
select * from t3 where a in (select a from t2);
a filler
1 filler
@@ -1221,7 +1230,6 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
drop table t1;
set @@optimizer_switch=@save_optimizer_switch;
-set @@optimizer_switch=@save_optimizer_switch;
set @@optimizer_switch='materialization=off';
create table t1 (a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
@@ -1255,6 +1263,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Y ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
2 DEPENDENT SUBQUERY A ALL NULL NULL NULL NULL 10 Using where
2 DEPENDENT SUBQUERY B ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
+#
+# SJ-Materialization scan + first table being system const table
+#
create table t0 (a int, b int);
insert into t0 values(1,1);
explain select * from t0, t3 where t3.a in (select a from t2) and (t3.a < 10 or t3.a >30);
@@ -1269,6 +1280,10 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t4 ALL NULL NULL NULL NULL 10 Using where; Start temporary
1 PRIMARY t3 ref a a 5 test.t4.x 10 Using where; End temporary
drop table t0,t1,t2,t3,t4;
+#
+# LooseScan with ref access
+#
+set @@optimizer_switch='join_cache_hashed=off';
create table t0 (a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 (a int, b int, filler char(100), key(a,b));
@@ -1305,6 +1320,9 @@ set @@optimizer_search_depth=@save_optimizer_search_depth;
set @@optimizer_switch=@save_optimizer_switch;
drop table t0, t1, t2;
set @@optimizer_switch='materialization=off';
+#
+# Primitive SJ-Materialization tests for DECIMAL and DATE
+#
create table t0 (a decimal(4,2));
insert into t0 values (10.24), (22.11);
create table t1 as select * from t0;
@@ -1336,6 +1354,9 @@ insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 as select a as a, a as b, a as c from t0 where a < 3;
create table t2 as select a as a, a as b from t0 where a < 3;
insert into t2 select * from t2;
+select count(*) from t2;
+count(*)
+6
explain select * from t1 where (a,b,c) in (select X.a, Y.a, Z.a from t2 X, t2 Y, t2 Z where X.b=33);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
@@ -1391,7 +1412,7 @@ drop table t0, t1;
create table t1 (
idIndividual int primary key
);
-insert into t1 values (1),(2);
+insert into t1 values (1),(2),(1000);
create table t2 (
idContact int primary key,
contactType int,
@@ -1403,7 +1424,7 @@ idAddress int primary key,
idContact int,
postalStripped varchar(100)
);
-insert into t3 values (1,1, 'foo'), (2,2,'bar');
+insert into t3 values (1,1, 'foo'), (2,2,'T2H3B2');
The following must be converted to a semi-join:
set @save_optimizer_switch=@@optimizer_switch;
set @@optimizer_switch='materialization=off';
@@ -1416,9 +1437,17 @@ WHERE cona.postalStripped='T2H3B2'
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY cona ALL NULL NULL NULL NULL 2 100.00 Using where; Start temporary
1 PRIMARY c eq_ref PRIMARY PRIMARY 4 test.cona.idContact 1 100.00 Using where
-1 PRIMARY a eq_ref PRIMARY PRIMARY 4 test.c.idObj 1 100.00 Using index; End temporary
+1 PRIMARY a eq_ref PRIMARY PRIMARY 4 test.c.idObj 1 50.00 Using index; End temporary
Warnings:
Note 1003 select `test`.`a`.`idIndividual` AS `idIndividual` from `test`.`t1` `a` semi join (`test`.`t3` `cona` join `test`.`t2` `c`) where `test`.`cona`.`postalStripped` = 'T2H3B2' and `test`.`a`.`idIndividual` = `test`.`c`.`idObj` and `test`.`c`.`idContact` = `test`.`cona`.`idContact`
+SELECT a.idIndividual FROM t1 a
+WHERE a.idIndividual IN
+( SELECT c.idObj FROM t3 cona
+INNER JOIN t2 c ON c.idContact=cona.idContact
+WHERE cona.postalStripped='T2H3B2'
+ );
+idIndividual
+2
set @@optimizer_switch=@save_optimizer_switch;
drop table t1,t2,t3;
#
diff --git a/mysql-test/main/subselect3_jcl6.result b/mysql-test/main/subselect3_jcl6.result
index 9df821e07dc..acd1269d875 100644
--- a/mysql-test/main/subselect3_jcl6.result
+++ b/mysql-test/main/subselect3_jcl6.result
@@ -99,10 +99,10 @@ explain extended
select oref, a, a in (select a from t1 where oref=t2.oref) Z from t2;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 100.00
-2 DEPENDENT SUBQUERY t1 ALL a NULL NULL NULL 8 100.00 Using where
+2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 3 100.00 Using where; Full scan on NULL key
Warnings:
Note 1276 Field or reference 'test.t2.oref' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `test`.`t2`.`oref` AS `oref`,`test`.`t2`.`a` AS `a`,<expr_cache><`test`.`t2`.`a`,`test`.`t2`.`oref`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` where `test`.`t1`.`oref` = `test`.`t2`.`oref` and trigcond(<cache>(`test`.`t2`.`a`) = `test`.`t1`.`a` or `test`.`t1`.`a` is null) having trigcond(`test`.`t1`.`a` is null)))) AS `Z` from `test`.`t2`
+Note 1003 /* select#1 */ select `test`.`t2`.`oref` AS `oref`,`test`.`t2`.`a` AS `a`,<expr_cache><`test`.`t2`.`a`,`test`.`t2`.`oref`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on a checking NULL where `test`.`t1`.`oref` = `test`.`t2`.`oref` having trigcond(`test`.`t1`.`a` is null))))) AS `Z` from `test`.`t2`
flush status;
select oref, a from t2 where a in (select a from t1 where oref=t2.oref);
oref a
@@ -196,7 +196,7 @@ t3.a in (select t1.a from t1, t2 where t1.b=t2.a and t2.b=t3.oref) Z
from t3;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 3 100.00
-2 DEPENDENT SUBQUERY t1 ref a a 4 func 2 100.00 Using where; Full scan on NULL key
+2 DEPENDENT SUBQUERY t1 ref a a 4 func 1 100.00 Using where; Full scan on NULL key
2 DEPENDENT SUBQUERY t2 ref a a 4 test.t1.b 1 100.00 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
Warnings:
Note 1276 Field or reference 'test.t3.oref' of SELECT #2 was resolved in SELECT #1
@@ -281,6 +281,8 @@ a b oref Z
NULL 1 100 0
NULL 2 100 NULL
drop table t1,t2,t3,t4;
+# More tests for tricky multi-column cases, where some of pushed-down
+# equalities are used for index lookups and some are not.
create table t1 (oref char(4), grp int, ie1 int, ie2 int);
insert into t1 (oref, grp, ie1, ie2) values
('aa', 10, 2, 1),
@@ -620,10 +622,18 @@ cc 2 0
cc NULL NULL
aa 1 1
bb NULL NULL
+explain select oref, a from t2 where a in (select ie from t1 where oref=t2.oref);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 6 Using where
+1 PRIMARY t2 hash_ALL NULL #hash#$hj 10 test.t1.oref,test.t1.ie 7 Using where; Using join buffer (flat, BNLH join)
select oref, a from t2 where a in (select ie from t1 where oref=t2.oref);
oref a
-aa 1
cc 5
+aa 1
+explain select oref, a from t2 where a not in (select ie from t1 where oref=t2.oref);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 7 Using where
+2 DEPENDENT SUBQUERY t1 unique_subquery PRIMARY PRIMARY 4 func 1 Using where; Full scan on NULL key
select oref, a from t2 where a not in (select ie from t1 where oref=t2.oref);
oref a
ee NULL
@@ -1128,8 +1138,8 @@ set @tmp_optimizer_switch=@@optimizer_switch;
set optimizer_switch='derived_merge=off,derived_with_keys=off';
explain select * from (select a from t0) X where a in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 11
-1 PRIMARY t1 ALL NULL NULL NULL NULL 20 Using where; FirstMatch(<derived2>); Using join buffer (flat, BNL join)
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 11 Using where
+1 PRIMARY t1 hash_ALL NULL #hash#$hj 5 X.a 20 Using where; FirstMatch(<derived2>); Using join buffer (flat, BNLH join)
2 DERIVED t0 ALL NULL NULL NULL NULL 11
drop table t0, t1;
set optimizer_switch=@tmp_optimizer_switch;
@@ -1160,9 +1170,9 @@ set @@optimizer_switch='firstmatch=off,materialization=off';
set @@max_heap_table_size= 16384;
explain select count(*) from t0 A, t0 B, t0 C, t0 D where D.a in (select a from t1 E where a+1 < 10000 + A.a + B.a +C.a+D.a);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY A ALL NULL NULL NULL NULL 10
-1 PRIMARY B ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
-1 PRIMARY E ALL NULL NULL NULL NULL 5 Using where; Start temporary; Using join buffer (incremental, BNL join)
+1 PRIMARY E ALL NULL NULL NULL NULL 5 Using where; Start temporary
+1 PRIMARY A ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
+1 PRIMARY B ALL NULL NULL NULL NULL 10 Using join buffer (incremental, BNL join)
1 PRIMARY C ALL NULL NULL NULL NULL 10 Using where; Using join buffer (incremental, BNL join)
1 PRIMARY D hash_ALL NULL #hash#$hj 5 test.E.a 10 Using where; End temporary; Using join buffer (incremental, BNLH join)
flush status;
@@ -1183,9 +1193,8 @@ create table t3 ( a int , filler char(100), key(a));
insert into t3 select A.a + 10*B.a, 'filler' from t0 A, t0 B;
explain select * from t3 where a in (select a from t2) and (a > 5 or a < 10);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t3 ref a a 5 test.t2.a 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Start temporary
+1 PRIMARY t3 ref a a 5 test.t2.a 1 End temporary; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
select * from t3 where a in (select a from t2);
a filler
1 filler
@@ -1224,7 +1233,6 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 hash_ALL NULL #hash#$hj 6 test.t1.a 2 Using where; Start temporary; End temporary; Using join buffer (flat, BNLH join)
drop table t1;
set @@optimizer_switch=@save_optimizer_switch;
-set @@optimizer_switch=@save_optimizer_switch;
set @@optimizer_switch='materialization=off';
create table t1 (a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
@@ -1258,6 +1266,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Y ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
2 DEPENDENT SUBQUERY A ALL NULL NULL NULL NULL 10 Using where
2 DEPENDENT SUBQUERY B ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
+#
+# SJ-Materialization scan + first table being system const table
+#
create table t0 (a int, b int);
insert into t0 values(1,1);
explain select * from t0, t3 where t3.a in (select a from t2) and (t3.a < 10 or t3.a >30);
@@ -1272,6 +1283,10 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t4 ALL NULL NULL NULL NULL 10 Using where; Start temporary
1 PRIMARY t3 ref a a 5 test.t4.x 10 Using where; End temporary; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
drop table t0,t1,t2,t3,t4;
+#
+# LooseScan with ref access
+#
+set @@optimizer_switch='join_cache_hashed=off';
create table t0 (a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 (a int, b int, filler char(100), key(a,b));
@@ -1308,14 +1323,17 @@ set @@optimizer_search_depth=@save_optimizer_search_depth;
set @@optimizer_switch=@save_optimizer_switch;
drop table t0, t1, t2;
set @@optimizer_switch='materialization=off';
+#
+# Primitive SJ-Materialization tests for DECIMAL and DATE
+#
create table t0 (a decimal(4,2));
insert into t0 values (10.24), (22.11);
create table t1 as select * from t0;
insert into t1 select * from t0;
explain select * from t0 where a in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t0 ALL NULL NULL NULL NULL 2
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where; FirstMatch(t0); Using join buffer (flat, BNL join)
+1 PRIMARY t0 ALL NULL NULL NULL NULL 2 Using where
+1 PRIMARY t1 hash_ALL NULL #hash#$hj 3 test.t0.a 4 Using where; FirstMatch(t0); Using join buffer (flat, BNLH join)
select * from t0 where a in (select a from t1);
a
10.24
@@ -1327,8 +1345,8 @@ create table t1 as select * from t0;
insert into t1 select * from t0;
explain select * from t0 where a in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t0 ALL NULL NULL NULL NULL 2
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where; FirstMatch(t0); Using join buffer (flat, BNL join)
+1 PRIMARY t0 ALL NULL NULL NULL NULL 2 Using where
+1 PRIMARY t1 hash_ALL NULL #hash#$hj 4 test.t0.a 4 Using where; FirstMatch(t0); Using join buffer (flat, BNLH join)
select * from t0 where a in (select a from t1);
a
2008-01-01
@@ -1339,6 +1357,9 @@ insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t1 as select a as a, a as b, a as c from t0 where a < 3;
create table t2 as select a as a, a as b from t0 where a < 3;
insert into t2 select * from t2;
+select count(*) from t2;
+count(*)
+6
explain select * from t1 where (a,b,c) in (select X.a, Y.a, Z.a from t2 X, t2 Y, t2 Z where X.b=33);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
@@ -1394,7 +1415,7 @@ drop table t0, t1;
create table t1 (
idIndividual int primary key
);
-insert into t1 values (1),(2);
+insert into t1 values (1),(2),(1000);
create table t2 (
idContact int primary key,
contactType int,
@@ -1406,7 +1427,7 @@ idAddress int primary key,
idContact int,
postalStripped varchar(100)
);
-insert into t3 values (1,1, 'foo'), (2,2,'bar');
+insert into t3 values (1,1, 'foo'), (2,2,'T2H3B2');
The following must be converted to a semi-join:
set @save_optimizer_switch=@@optimizer_switch;
set @@optimizer_switch='materialization=off';
@@ -1419,9 +1440,17 @@ WHERE cona.postalStripped='T2H3B2'
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY cona ALL NULL NULL NULL NULL 2 100.00 Using where; Start temporary
1 PRIMARY c eq_ref PRIMARY PRIMARY 4 test.cona.idContact 1 100.00 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 PRIMARY a eq_ref PRIMARY PRIMARY 4 test.c.idObj 1 100.00 Using index; End temporary
+1 PRIMARY a eq_ref PRIMARY PRIMARY 4 test.c.idObj 1 50.00 Using index; End temporary
Warnings:
Note 1003 select `test`.`a`.`idIndividual` AS `idIndividual` from `test`.`t1` `a` semi join (`test`.`t3` `cona` join `test`.`t2` `c`) where `test`.`cona`.`postalStripped` = 'T2H3B2' and `test`.`a`.`idIndividual` = `test`.`c`.`idObj` and `test`.`c`.`idContact` = `test`.`cona`.`idContact`
+SELECT a.idIndividual FROM t1 a
+WHERE a.idIndividual IN
+( SELECT c.idObj FROM t3 cona
+INNER JOIN t2 c ON c.idContact=cona.idContact
+WHERE cona.postalStripped='T2H3B2'
+ );
+idIndividual
+2
set @@optimizer_switch=@save_optimizer_switch;
drop table t1,t2,t3;
#
diff --git a/mysql-test/main/subselect4.result b/mysql-test/main/subselect4.result
index 5589272d066..29e7e11f0ba 100644
--- a/mysql-test/main/subselect4.result
+++ b/mysql-test/main/subselect4.result
@@ -265,7 +265,7 @@ EXPLAIN
SELECT * FROM t1 WHERE NULL NOT IN (SELECT t2c.i FROM t2c WHERE t2c.pk = t1.pk);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
-2 DEPENDENT SUBQUERY t2c index_subquery it2c it2c 8 const,test.t1.pk 2 Using index; Using where; Full scan on NULL key
+2 DEPENDENT SUBQUERY t2c index_subquery it2c it2c 8 const,test.t1.pk 1 Using index; Using where; Full scan on NULL key
SELECT * FROM t1 WHERE NULL NOT IN (SELECT t2c.i FROM t2c WHERE t2c.pk = t1.pk);
pk i
SELECT * FROM t1 WHERE NULL IN (SELECT t2c.i FROM t2c WHERE t2c.pk = t1.pk) IS UNKNOWN;
@@ -335,7 +335,7 @@ EXPLAIN
SELECT * FROM t1 WHERE (NULL, 1) NOT IN (SELECT t2c.i, t2c.pk FROM t2c WHERE t2c.pk = t1.pk);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
-2 DEPENDENT SUBQUERY t2c index_subquery it2c it2c 8 const,test.t1.pk 2 Using index; Using where; Full scan on NULL key
+2 DEPENDENT SUBQUERY t2c index_subquery it2c it2c 8 const,test.t1.pk 1 Using index; Using where; Full scan on NULL key
SELECT * FROM t1 WHERE (NULL, 1) NOT IN (SELECT t2c.i, t2c.pk FROM t2c WHERE t2c.pk = t1.pk);
pk i
0 10
@@ -714,8 +714,7 @@ WHERE ( t1.f10 ) IN ( SELECT f11 FROM t2 GROUP BY f11 ));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
2 SUBQUERY t1 ALL NULL NULL NULL NULL 2
-2 SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 func 1
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 2
+2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
SELECT * FROM t1
WHERE f3 = (
SELECT t1.f3 FROM t1
@@ -729,8 +728,7 @@ WHERE ( f10, f10 ) IN ( SELECT f11, f11 FROM t2 GROUP BY f11 ));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
2 SUBQUERY t1 ALL NULL NULL NULL NULL 2
-2 SUBQUERY <subquery3> eq_ref distinct_key distinct_key 8 func,func 1
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 2
+2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
SELECT * FROM t1
WHERE f3 = (
SELECT f3 FROM t1
@@ -1172,7 +1170,7 @@ WHERE (t2.pk = t1.pk) AND t2.pk IN (SELECT f2 FROM t1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 system PRIMARY NULL NULL NULL 1
1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1
-2 DEPENDENT SUBQUERY t1 index_subquery f2 f2 4 func 2 Using index
+2 DEPENDENT SUBQUERY t1 index_subquery f2 f2 4 func 1 Using index
SELECT t1.f3, MAX(t1.f2)
FROM t1, t2
WHERE (t2.pk = t1.pk) AND t2.pk IN (SELECT f2 FROM t1);
@@ -1249,7 +1247,7 @@ drop table t1, t2;
#
CREATE TABLE t1 (c1 varchar(1) DEFAULT NULL);
CREATE TABLE t2 (c1 varchar(1) DEFAULT NULL);
-INSERT INTO t2 VALUES ('k'), ('d');
+INSERT INTO t2 VALUES ('k'), ('d'),('x');
CREATE TABLE t3 (c1 varchar(1) DEFAULT NULL);
INSERT INTO t3 VALUES ('a'), ('b'), ('c');
CREATE TABLE t4 (c1 varchar(1) primary key);
@@ -1262,16 +1260,16 @@ EXPLAIN
SELECT * FROM t1 RIGHT JOIN t2 ON t1.c1 WHERE 's' IN (SELECT c1 FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 0 Const row not found
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t1)
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
SELECT * FROM t1 RIGHT JOIN t2 ON t1.c1 WHERE 's' IN (SELECT c1 FROM t2);
c1 c1
EXPLAIN
SELECT * FROM t2 LEFT JOIN t1 ON t1.c1 WHERE 's' IN (SELECT c1 FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 0 Const row not found
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t1)
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
SELECT * FROM t2 LEFT JOIN t1 ON t1.c1 WHERE 's' IN (SELECT c1 FROM t2);
c1 c1
SET optimizer_switch='materialization=on';
@@ -1279,19 +1277,18 @@ EXPLAIN
SELECT * FROM (t2 LEFT JOIN t1 ON t1.c1) LEFT JOIN t3 on t3.c1 WHERE 's' IN (SELECT c1 FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 0 Const row not found
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; Start temporary; End temporary
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
1 PRIMARY t3 ALL NULL NULL NULL NULL 3 Using where
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using where
SELECT * FROM (t2 LEFT JOIN t1 ON t1.c1) LEFT JOIN t3 on t3.c1 WHERE 's' IN (SELECT c1 FROM t2);
c1 c1 c1
EXPLAIN
SELECT * FROM t4 LEFT JOIN t2 ON t4.c1 WHERE 's' IN (SELECT c1 FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t4 index NULL PRIMARY 3 NULL 2 Using index
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-1 PRIMARY t4 index NULL PRIMARY 3 NULL 2 Using index; Using join buffer (flat, BNL join)
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
SELECT * FROM t4 LEFT JOIN t2 ON t4.c1 WHERE 's' IN (SELECT c1 FROM t2);
c1 c1
SET optimizer_switch=@save_optimizer_switch;
@@ -1542,7 +1539,7 @@ EXPLAIN
SELECT 'bug' FROM DUAL WHERE ( 5 ) IN ( SELECT * FROM v1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
-2 SUBQUERY <derived3> ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <derived3> const NULL distinct_key 4 const 1 Using where
3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
4 UNION NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -1552,7 +1549,7 @@ EXPLAIN
SELECT ( 5 ) IN ( SELECT * FROM v1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
-2 SUBQUERY <derived3> ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <derived3> const NULL distinct_key 4 const 1 Using where
3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
4 UNION NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -1563,7 +1560,7 @@ EXPLAIN
SELECT 'bug' FROM DUAL WHERE ( 5 ) IN (SELECT * FROM v2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
-2 SUBQUERY <derived3> ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <derived3> const NULL distinct_key 5 const 1 Using where
3 DERIVED t1 system NULL NULL NULL NULL 1
4 UNION t2 system NULL NULL NULL NULL 1
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -1583,7 +1580,7 @@ EXPLAIN
SELECT ( 5 ) IN ( SELECT * FROM v2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
-2 SUBQUERY <derived3> ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <derived3> index_subquery NULL distinct_key 5 const 2
3 DERIVED t1 system NULL NULL NULL NULL 1
4 UNION t2 system NULL NULL NULL NULL 1
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -1595,7 +1592,7 @@ EXPLAIN
SELECT 'bug' FROM DUAL WHERE ( 5 ) IN ( SELECT * FROM v1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
-2 SUBQUERY <derived3> ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <derived3> const NULL distinct_key 4 const 1 Using where
3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
4 UNION NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -1605,7 +1602,7 @@ EXPLAIN
SELECT ( 5 ) IN ( SELECT * FROM v1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
-2 SUBQUERY <derived3> ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <derived3> const NULL distinct_key 4 const 1 Using where
3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
4 UNION NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -1616,7 +1613,7 @@ EXPLAIN
SELECT 'bug' FROM DUAL WHERE ( 5 ) IN (SELECT * FROM v2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
-2 SUBQUERY <derived3> ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <derived3> const NULL distinct_key 5 const 1 Using where
3 DERIVED t1 system NULL NULL NULL NULL 1
4 UNION t2 system NULL NULL NULL NULL 1
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -1626,7 +1623,7 @@ EXPLAIN
SELECT 'bug' FROM t3 WHERE ( 5 ) IN (SELECT * FROM v2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 system NULL NULL NULL NULL 1
-2 SUBQUERY <derived3> ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <derived3> const NULL distinct_key 5 const 1 Using where
3 DERIVED t1 system NULL NULL NULL NULL 1
4 UNION t2 system NULL NULL NULL NULL 1
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -1636,7 +1633,7 @@ EXPLAIN
SELECT ( 5 ) IN ( SELECT * FROM v2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used
-2 SUBQUERY <derived3> ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <derived3> index_subquery NULL distinct_key 5 const 2
3 DERIVED t1 system NULL NULL NULL NULL 1
4 UNION t2 system NULL NULL NULL NULL 1
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -2755,12 +2752,21 @@ INSERT INTO t4 VALUES
('w'),('w'),('x'),('x'), (NULL),(NULL);
SET @save_join_cache_level=@@join_cache_level;
SET join_cache_level=0;
+explain select 1
+from t2 join t1 on
+('i','w') not in (select t1.v1,t4.v2 from t4,t1,t3 where t3.v2 = t1.v1) LIMIT ROWS EXAMINED 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2
+1 PRIMARY t1 index NULL v1 9 NULL 5 Using index
+2 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 Using where
+2 MATERIALIZED t1 ref v1 v1 4 test.t3.v2 1 Using index
+2 MATERIALIZED t4 ALL NULL NULL NULL NULL 50
select 1
from t2 join t1 on
-('i','w') not in (select t1.v1,t4.v2 from t4,t1,t3 where t3.v2 = t1.v1) LIMIT ROWS EXAMINED 500;
+('i','w') not in (select t1.v1,t4.v2 from t4,t1,t3 where t3.v2 = t1.v1) LIMIT ROWS EXAMINED 10;
1
Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 3020 rows, which exceeds LIMIT ROWS EXAMINED (500). The query result may be incomplete
+Warning 1931 Query execution was interrupted. The query examined at least 14 rows, which exceeds LIMIT ROWS EXAMINED (10). The query result may be incomplete
SET join_cache_level= @save_join_cache_level;
DROP TABLE t1,t2,t3,t4;
#
@@ -2790,9 +2796,8 @@ set names 'utf8';
EXPLAIN
SELECT * FROM t2 WHERE (t2.a,t2.b) IN (('abc',1), ('def', 2));
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 5
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 16 func,func 1 Using where
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2
+1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY <derived3> ref key1,distinct_key key1 4 test.t2.b 1 Using where; FirstMatch(t2)
3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
set names default;
set @@in_predicate_conversion_threshold= @save_in_predicate_conversion_threshold;
@@ -2902,8 +2907,8 @@ WHERE tn.key1 IN ('1','2','3','4','5','6','7','8','9','10')
);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY hist ALL NULL NULL NULL NULL 100 Using where
-2 DEPENDENT SUBQUERY tms range PRIMARY PRIMARY 32 NULL 10 Using where; Using index
-2 DEPENDENT SUBQUERY tn eq_ref PRIMARY PRIMARY 32 test.tms.key1 1 Using where
+2 DEPENDENT SUBQUERY tn range PRIMARY PRIMARY 32 NULL 10 Using index condition; Using where
+2 DEPENDENT SUBQUERY tms eq_ref PRIMARY PRIMARY 32 test.tn.key1 1 Using index
set optimizer_switch=@tmp_os;
drop table t1, t10, t11;
#
@@ -2922,6 +2927,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"duplicate_removal": {
@@ -2931,9 +2937,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -2948,6 +2956,7 @@ ANALYZE
"r_loops": 0,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -2955,9 +2964,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -2970,6 +2981,7 @@ ANALYZE
{
"query_block": {
"select_id": 3,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -2977,9 +2989,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 2,
"r_rows": 2,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -3132,10 +3146,9 @@ where b in (select c from t3
group by (select a from t1 where a = 1) in (select d from t4));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t3 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 50.00 Using where; FirstMatch(t2); Using join buffer (flat, BNL join)
Warnings:
-Note 1003 select `test`.`t2`.`b` AS `b` from `test`.`t2` semi join (`test`.`t3`) where 1
+Note 1003 select `test`.`t2`.`b` AS `b` from `test`.`t2` semi join (`test`.`t3`) where `test`.`t3`.`c` = `test`.`t2`.`b`
select b from t2
where b in (select c from t3
group by (select a from t1 where a = 1) in (select d from t4));
diff --git a/mysql-test/main/subselect4.test b/mysql-test/main/subselect4.test
index 827037c6d5f..5f2a91f8ca2 100644
--- a/mysql-test/main/subselect4.test
+++ b/mysql-test/main/subselect4.test
@@ -946,7 +946,7 @@ drop table t1, t2;
CREATE TABLE t1 (c1 varchar(1) DEFAULT NULL);
CREATE TABLE t2 (c1 varchar(1) DEFAULT NULL);
-INSERT INTO t2 VALUES ('k'), ('d');
+INSERT INTO t2 VALUES ('k'), ('d'),('x');
CREATE TABLE t3 (c1 varchar(1) DEFAULT NULL);
INSERT INTO t3 VALUES ('a'), ('b'), ('c');
CREATE TABLE t4 (c1 varchar(1) primary key);
@@ -2284,9 +2284,13 @@ INSERT INTO t4 VALUES
SET @save_join_cache_level=@@join_cache_level;
SET join_cache_level=0;
+explain select 1
+from t2 join t1 on
+('i','w') not in (select t1.v1,t4.v2 from t4,t1,t3 where t3.v2 = t1.v1) LIMIT ROWS EXAMINED 10;
+
select 1
from t2 join t1 on
-('i','w') not in (select t1.v1,t4.v2 from t4,t1,t3 where t3.v2 = t1.v1) LIMIT ROWS EXAMINED 500;
+('i','w') not in (select t1.v1,t4.v2 from t4,t1,t3 where t3.v2 = t1.v1) LIMIT ROWS EXAMINED 10;
SET join_cache_level= @save_join_cache_level;
DROP TABLE t1,t2,t3,t4;
diff --git a/mysql-test/main/subselect_cache.result b/mysql-test/main/subselect_cache.result
index 5c2fd3e66fc..10bb54cbb14 100644
--- a/mysql-test/main/subselect_cache.result
+++ b/mysql-test/main/subselect_cache.result
@@ -49,6 +49,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -56,9 +57,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -73,6 +76,7 @@ ANALYZE
"r_hit_ratio": 60,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 4,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -80,9 +84,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 4,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -106,6 +112,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -113,9 +120,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 10,
"r_rows": 10,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -138,6 +147,7 @@ ANALYZE
{
"query_block": {
"select_id": 3,
+ "cost": "REPLACED",
"r_loops": 4,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -145,9 +155,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 4,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -178,6 +190,7 @@ ANALYZE
"r_hit_ratio": 60,
"query_block": {
"select_id": 2,
+ "cost": "REPLACED",
"r_loops": 4,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -185,9 +198,11 @@ ANALYZE
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 4,
"rows": 4,
"r_rows": 4,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -208,12 +223,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -224,12 +242,15 @@ EXPLAIN
"state": "uninitialized",
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = t2.c"
}
@@ -247,12 +268,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -269,12 +293,15 @@ EXPLAIN
{
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = t2.c"
}
@@ -301,12 +328,15 @@ EXPLAIN
"state": "uninitialized",
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t2",
"access_type": "ALL",
+ "loops": 1,
"rows": 4,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "t1.b = t2.c"
}
diff --git a/mysql-test/main/subselect_cache.test b/mysql-test/main/subselect_cache.test
index cf647afb994..7695b01b1ea 100644
--- a/mysql-test/main/subselect_cache.test
+++ b/mysql-test/main/subselect_cache.test
@@ -34,8 +34,10 @@ select a, (select d from t2 where b=c) from t1;
--source include/analyze-format.inc
analyze format=json
select a, (select d from t2 where b=c), (select d from t2 where b=c union select 1 order by 1 limit 1) from t1;
+--source include/explain-no-costs.inc
explain format=json
select a, (select d from t2 where b=c) from t1;
+--source include/explain-no-costs.inc
explain format=json
select a, (select d from t2 where b=c), (select d from t2 where b=c union select 1 order by 1 limit 1) from t1;
set optimizer_switch='subquery_cache=off';
diff --git a/mysql-test/main/subselect_exists2in.result b/mysql-test/main/subselect_exists2in.result
index 6ff518b5a29..051003a8df8 100644
--- a/mysql-test/main/subselect_exists2in.result
+++ b/mysql-test/main/subselect_exists2in.result
@@ -51,8 +51,8 @@ c
explain extended
SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t3 WHERE t3.b = t1.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 index aa aa 4 NULL 2 100.00 Using index
-1 PRIMARY t3 ALL bb NULL NULL NULL 2 100.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t1 index aa aa 4 NULL 2 100.00 Using where; Using index
+1 PRIMARY t3 ref bb bb 4 test.t1.a 1 100.00 FirstMatch(t1)
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t3`) where `test`.`t3`.`b` = `test`.`t1`.`a`
@@ -65,10 +65,10 @@ explain extended
SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t3 WHERE t3.b = t1.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL aa 4 NULL 2 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t3 ALL bb NULL NULL NULL 2 100.00 Using where
+2 DEPENDENT SUBQUERY t3 index_subquery bb bb 4 func 1 100.00
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where <in_optimizer>(`test`.`t1`.`a`,<exists>(/* select#2 */ select `test`.`t3`.`b` from `test`.`t3` where <cache>(`test`.`t1`.`a`) = `test`.`t3`.`b`))
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where <in_optimizer>(`test`.`t1`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`a`) in t3 on bb)))
-- EXIST2IN then MATERIALIZATION
set optimizer_switch='exists_to_in=on,in_to_exists=off,semijoin=off,materialization=on,subquery_cache=off';
SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t3 WHERE t3.b = t1.a);
@@ -91,7 +91,7 @@ explain extended
SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t3 WHERE t3.b = t1.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL aa 4 NULL 2 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t3 ALL bb NULL NULL NULL 2 100.00 Using where
+2 DEPENDENT SUBQUERY t3 ref bb bb 4 test.t1.a 1 100.00
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where exists(/* select#2 */ select `test`.`t3`.`a` from `test`.`t3` where `test`.`t3`.`b` = `test`.`t1`.`a` limit 1)
@@ -297,23 +297,28 @@ d a b e412 e412 h412
d b a i421 i421 l421
d b b m422 m422 o422
drop table t1, t2, t3;
+#
+# LP BUG#901835 - incorrect semi-join conversion after exists2in
+#
CREATE TABLE t1 ( a INT );
-INSERT INTO t1 VALUES (7),(0);
+INSERT INTO t1 VALUES (7),(0),(100);
CREATE TABLE t2 ( b INT );
-INSERT INTO t2 VALUES (0),(8);
+INSERT INTO t2 VALUES (0),(8),(1000),(2000),(3000),(4000),(5000);
+insert into t2 select seq from seq_6000_to_6100;
SELECT * FROM t1 WHERE
EXISTS ( SELECT * FROM t2 WHERE b = a )
OR a > 0;
a
7
0
+100
explain extended
SELECT * FROM t1 WHERE
EXISTS ( SELECT * FROM t2 WHERE b = a )
OR a > 0;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 108 100.00
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where <expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,`test`.`t1`.`a` in ( <materialize> (/* select#2 */ select `test`.`t2`.`b` from `test`.`t2` where 1 ), <primary_index_lookup>(`test`.`t1`.`a` in <temporary table> on distinct_key where `test`.`t1`.`a` = `<subquery2>`.`b`)))) or `test`.`t1`.`a` > 0
@@ -386,7 +391,7 @@ explain extended
SELECT * FROM t1 WHERE EXISTS ( SELECT * FROM t3 WHERE t3.b = t1.a and t3.b1 = t1.a1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 index bb bb 8 NULL 2 100.00 Using where; Using index; LooseScan
-1 PRIMARY t1 ref aa aa 8 test.t3.b,test.t3.b1 2 100.00 Using index
+1 PRIMARY t1 ref aa aa 8 test.t3.b,test.t3.b1 1 50.00 Using index
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1276 Field or reference 'test.t1.a1' of SELECT #2 was resolved in SELECT #1
@@ -400,7 +405,7 @@ explain extended
SELECT * FROM t1 WHERE EXISTS ( SELECT * FROM t3 WHERE t3.b = t1.a and t3.b1 = t1.a1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL aa 8 NULL 2 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t3 index_subquery bb bb 8 func,func 2 100.00 Using index; Using where
+2 DEPENDENT SUBQUERY t3 index_subquery bb bb 8 func,func 1 100.00 Using index; Using where
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1276 Field or reference 'test.t1.a1' of SELECT #2 was resolved in SELECT #1
@@ -428,7 +433,7 @@ explain extended
SELECT * FROM t1 WHERE EXISTS ( SELECT * FROM t3 WHERE t3.b = t1.a and t3.b1 = t1.a1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL aa 8 NULL 2 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t3 ref bb bb 8 test.t1.a,test.t1.a1 2 100.00 Using index
+2 DEPENDENT SUBQUERY t3 ref bb bb 8 test.t1.a,test.t1.a1 1 100.00 Using index
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1276 Field or reference 'test.t1.a1' of SELECT #2 was resolved in SELECT #1
@@ -591,7 +596,7 @@ SELECT * FROM t1 AS alias
WHERE EXISTS ( SELECT * FROM t1 WHERE a > alias.a AND a = alias.b );
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY alias ALL NULL NULL NULL NULL 2 100.00 Using where
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where; FirstMatch(alias); Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 50.00 Using where; FirstMatch(alias); Using join buffer (flat, BNL join)
Warnings:
Note 1276 Field or reference 'test.alias.a' of SELECT #2 was resolved in SELECT #1
Note 1276 Field or reference 'test.alias.b' of SELECT #2 was resolved in SELECT #1
@@ -622,7 +627,7 @@ SELECT * FROM t1 AS alias
WHERE EXISTS ( SELECT * FROM t1 WHERE a > alias.a AND a = alias.b );
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY alias ALL NULL NULL NULL NULL 2 100.00 Using where
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where; Start temporary; End temporary
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 50.00 Using where; Start temporary; End temporary
Warnings:
Note 1276 Field or reference 'test.alias.a' of SELECT #2 was resolved in SELECT #1
Note 1276 Field or reference 'test.alias.b' of SELECT #2 was resolved in SELECT #1
@@ -713,14 +718,15 @@ set optimizer_switch='exists_to_in=on';
# correct calculation of reserved items (postreview-fix)
#
create table t1 (col1 int, col2 int, col3 int);
-insert into t1 values (1,2,3),(2,3,4),(4,5,6);
+insert into t1 values (1,2,3),(2,3,4),(4,5,6),(7,8,9);
create table t2 as select * from t1;
+insert into t2 select seq,seq,seq from seq_1000_to_1200;
explain extended
select * from t1 where exists (select col2 from t2 where t2.col1=t1.col1 and t2.col2=t1.col2);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 205 100.00
Warnings:
Note 1276 Field or reference 'test.t1.col1' of SELECT #2 was resolved in SELECT #1
Note 1276 Field or reference 'test.t1.col2' of SELECT #2 was resolved in SELECT #1
@@ -730,6 +736,7 @@ col1 col2 col3
1 2 3
2 3 4
4 5 6
+7 8 9
drop table t1,t2;
#
# MDEV-3879: Exists2In: Wrong result (extra row) and unexpected
@@ -902,9 +909,8 @@ WHERE EXISTS ( SELECT * FROM t1 AS sq2
WHERE sq1.`pk` IN ( SELECT f1 FROM t1 ) AND sq2.f1 = sq1.f1 );
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-3 DEPENDENT SUBQUERY <subquery4> eq_ref distinct_key distinct_key 4 func 1 100.00
+3 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 2 50.00 Using where; FirstMatch
3 DEPENDENT SUBQUERY sq2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
-4 MATERIALIZED t1 ALL NULL NULL NULL NULL 2 100.00
Warnings:
Note 1276 Field or reference 'sq1.pk' of SELECT #3 was resolved in SELECT #1
Note 1276 Field or reference 'sq1.f1' of SELECT #3 was resolved in SELECT #1
@@ -922,9 +928,8 @@ WHERE EXISTS ( SELECT * FROM t1 AS sq2
WHERE sq1.`pk` IN ( SELECT f1 FROM t1 ) AND sq2.f1 = sq1.f1 );
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-3 DEPENDENT SUBQUERY <subquery4> eq_ref distinct_key distinct_key 4 func 1 100.00
+3 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 2 50.00 Using where; FirstMatch
3 DEPENDENT SUBQUERY sq2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
-4 MATERIALIZED t1 ALL NULL NULL NULL NULL 2 100.00
Warnings:
Note 1276 Field or reference 'sq1.pk' of SELECT #3 was resolved in SELECT #1
Note 1276 Field or reference 'sq1.f1' of SELECT #3 was resolved in SELECT #1
diff --git a/mysql-test/main/subselect_exists2in.test b/mysql-test/main/subselect_exists2in.test
index 8ad89be2b65..625c007e006 100644
--- a/mysql-test/main/subselect_exists2in.test
+++ b/mysql-test/main/subselect_exists2in.test
@@ -1,4 +1,5 @@
--source include/default_optimizer_switch.inc
+--source include/have_sequence.inc
--disable_warnings
drop table if exists t1,t2,t3;
@@ -238,13 +239,14 @@ group by a1,a2,b;
drop table t1, t2, t3;
-#
-# LP BUG#901835 - incorrect semi-join conversion after exists2in
-#
+--echo #
+--echo # LP BUG#901835 - incorrect semi-join conversion after exists2in
+--echo #
CREATE TABLE t1 ( a INT );
-INSERT INTO t1 VALUES (7),(0);
+INSERT INTO t1 VALUES (7),(0),(100);
CREATE TABLE t2 ( b INT );
-INSERT INTO t2 VALUES (0),(8);
+INSERT INTO t2 VALUES (0),(8),(1000),(2000),(3000),(4000),(5000);
+insert into t2 select seq from seq_6000_to_6100;
SELECT * FROM t1 WHERE
EXISTS ( SELECT * FROM t2 WHERE b = a )
@@ -571,8 +573,10 @@ set optimizer_switch='exists_to_in=on';
--echo # correct calculation of reserved items (postreview-fix)
--echo #
create table t1 (col1 int, col2 int, col3 int);
-insert into t1 values (1,2,3),(2,3,4),(4,5,6);
+insert into t1 values (1,2,3),(2,3,4),(4,5,6),(7,8,9);
create table t2 as select * from t1;
+insert into t2 select seq,seq,seq from seq_1000_to_1200;
+
explain extended
select * from t1 where exists (select col2 from t2 where t2.col1=t1.col1 and t2.col2=t1.col2);
select * from t1 where exists (select col2 from t2 where t2.col1=t1.col1 and t2.col2=t1.col2);
diff --git a/mysql-test/main/subselect_exists2in_costmat.result b/mysql-test/main/subselect_exists2in_costmat.result
index 1c9574aafd3..6aecfeafb65 100644
--- a/mysql-test/main/subselect_exists2in_costmat.result
+++ b/mysql-test/main/subselect_exists2in_costmat.result
@@ -37,8 +37,6 @@ create index Language on CountryLanguage(Language);
create index CityName on City(Name);
alter table City change population population int(11) null default 0;
select max(id) from City into @max_city_id;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into City values (@max_city_id + 1,'Kilifarevo','BGR',NULL);
SELECT COUNT(*) FROM Country;
COUNT(*)
diff --git a/mysql-test/main/subselect_exists2in_costmat.test b/mysql-test/main/subselect_exists2in_costmat.test
index 371f0936d1a..dd3890496f5 100644
--- a/mysql-test/main/subselect_exists2in_costmat.test
+++ b/mysql-test/main/subselect_exists2in_costmat.test
@@ -67,6 +67,7 @@ set @@optimizer_switch = 'exists_to_in=on,in_to_exists=on,semijoin=on,materializ
-- echo Q1.1m:
-- echo MATERIALIZATION: there are too many rows in the outer query
-- echo to be looked up in the inner table.
+
EXPLAIN
SELECT Name FROM Country
WHERE (EXISTS (select 1 from City where City.Population > 100000 and
diff --git a/mysql-test/main/subselect_extra.result b/mysql-test/main/subselect_extra.result
index c654fdfca13..247b36dbf49 100644
--- a/mysql-test/main/subselect_extra.result
+++ b/mysql-test/main/subselect_extra.result
@@ -68,7 +68,7 @@ select * from t1
where id in (select id from t1 as x1 where (t1.cur_date is null));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 PRIMARY x1 ALL NULL NULL NULL NULL 2 100.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY x1 ALL NULL NULL NULL NULL 2 50.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1276 Field or reference 'test.t1.cur_date' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`cur_date` AS `cur_date` from `test`.`t1` semi join (`test`.`t1` `x1`) where `test`.`x1`.`id` = `test`.`t1`.`id` and `test`.`t1`.`cur_date` = 0
@@ -80,7 +80,7 @@ select * from t2
where id in (select id from t2 as x1 where (t2.cur_date is null));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 PRIMARY x1 ALL NULL NULL NULL NULL 2 100.00 Using where; FirstMatch(t2); Using join buffer (flat, BNL join)
+1 PRIMARY x1 ALL NULL NULL NULL NULL 2 50.00 Using where; FirstMatch(t2); Using join buffer (flat, BNL join)
Warnings:
Note 1276 Field or reference 'test.t2.cur_date' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t2`.`id` AS `id`,`test`.`t2`.`cur_date` AS `cur_date` from `test`.`t2` semi join (`test`.`t2` `x1`) where `test`.`x1`.`id` = `test`.`t2`.`id` and `test`.`t2`.`cur_date` = 0
@@ -393,7 +393,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 2 FirstMatch(t1)
+1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 1 FirstMatch(t1)
3 DERIVED t2 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
SELECT * FROM v2;
a b
@@ -413,7 +413,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.a,test.t1.b 1 FirstMatch(t1)
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 10 test.t1.a,test.t1.b 1
3 DERIVED t2 ALL NULL NULL NULL NULL 6
4 UNION t3 ALL NULL NULL NULL NULL 4
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -451,8 +451,8 @@ WHERE t3.b IN (SELECT v1.b FROM v1, t2
WHERE t2.c = v1.c AND t2.c = v1.b AND v1.b = t3.c);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 system NULL NULL NULL NULL 1
-1 PRIMARY <derived3> ref key1 key1 8 const,const 0 Start temporary
-1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where
+1 PRIMARY <derived3> ref key1 key1 8 const,const 0 FirstMatch(t3)
3 DERIVED t1 ALL NULL NULL NULL NULL 3 Using where
SELECT * FROM t3
WHERE t3.b IN (SELECT v1.b FROM v1, t2
@@ -474,7 +474,7 @@ EXPLAIN
SELECT * FROM t1 WHERE t1.b IN (SELECT v2.a FROM v2 WHERE v2.b = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-1 PRIMARY <derived3> ref key0 key0 10 test.t1.b,test.t1.a 2 FirstMatch(t1)
+1 PRIMARY <derived3> ref key0 key0 10 test.t1.b,test.t1.a 1 FirstMatch(t1)
3 DERIVED t2 ALL NULL NULL NULL NULL 2
SELECT * FROM t1 WHERE t1.b IN (SELECT v2.a FROM v2 WHERE v2.b = t1.a);
a b
diff --git a/mysql-test/main/subselect_extra_no_semijoin.result b/mysql-test/main/subselect_extra_no_semijoin.result
index faeaf75c590..ead53edf311 100644
--- a/mysql-test/main/subselect_extra_no_semijoin.result
+++ b/mysql-test/main/subselect_extra_no_semijoin.result
@@ -395,7 +395,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 10 func,func 2 Using where
+2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 10 func,func 1 Using where
3 DERIVED t2 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
SELECT * FROM v2;
a b
@@ -415,7 +415,7 @@ EXPLAIN
SELECT a FROM t1 WHERE (a,b) IN (SELECT * FROM v2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY <derived3> index_subquery key0 key0 10 func,func 1 Using where
+2 DEPENDENT SUBQUERY <derived3> unique_subquery distinct_key distinct_key 10 func,func 1 Using where
3 DERIVED t2 ALL NULL NULL NULL NULL 6
4 UNION t3 ALL NULL NULL NULL NULL 4
NULL UNION RESULT <union3,4> ALL NULL NULL NULL NULL NULL
@@ -476,7 +476,7 @@ EXPLAIN
SELECT * FROM t1 WHERE t1.b IN (SELECT v2.a FROM v2 WHERE v2.b = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY <derived3> ref key0 key0 5 test.t1.a 2 Using where
+2 DEPENDENT SUBQUERY <derived3> ref key0 key0 5 test.t1.a 1 Using where
3 DERIVED t2 ALL NULL NULL NULL NULL 2
SELECT * FROM t1 WHERE t1.b IN (SELECT v2.a FROM v2 WHERE v2.b = t1.a);
a b
diff --git a/mysql-test/main/subselect_firstmatch.result b/mysql-test/main/subselect_firstmatch.result
new file mode 100644
index 00000000000..86a3dd1bada
--- /dev/null
+++ b/mysql-test/main/subselect_firstmatch.result
@@ -0,0 +1,30 @@
+#
+# Check that firstmatch works with HASH
+#
+create table t1 (a int, b int);
+insert into t1 select seq, seq from seq_1_to_10;
+create table t2 (a int, b int);
+insert into t2 select A.seq,A.seq from seq_1_to_10 A, seq_1_to_10 B;
+set @save_join_cache_level=@@join_cache_level;
+set join_cache_level=6;
+explain select * from t1 where t1.a in (select t2.a from t2 where t1.b=t2.b);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 10 Using where
+1 PRIMARY t2 hash_ALL NULL #hash#$hj 10 test.t1.a,test.t1.b 100 Using where; FirstMatch(t1); Using join buffer (flat, BNLH join)
+select * from t1 where t1.a in (select t2.a from t2 where t1.b=t2.b);
+a b
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+10 10
+set @@join_cache_level=@save_join_cache_level;
+drop table t1,t2;
+#
+# End of 11.0 tests
+#
diff --git a/mysql-test/main/subselect_firstmatch.test b/mysql-test/main/subselect_firstmatch.test
new file mode 100644
index 00000000000..54da8bf0548
--- /dev/null
+++ b/mysql-test/main/subselect_firstmatch.test
@@ -0,0 +1,28 @@
+#
+# Test for semijoins that don't need to be run for a lot of combinations
+#
+--source include/have_sequence.inc
+
+--echo #
+--echo # Check that firstmatch works with HASH
+--echo #
+
+create table t1 (a int, b int);
+insert into t1 select seq, seq from seq_1_to_10;
+
+create table t2 (a int, b int);
+insert into t2 select A.seq,A.seq from seq_1_to_10 A, seq_1_to_10 B;
+
+set @save_join_cache_level=@@join_cache_level;
+set join_cache_level=6;
+
+explain select * from t1 where t1.a in (select t2.a from t2 where t1.b=t2.b);
+select * from t1 where t1.a in (select t2.a from t2 where t1.b=t2.b);
+
+set @@join_cache_level=@save_join_cache_level;
+
+drop table t1,t2;
+
+--echo #
+--echo # End of 11.0 tests
+--echo #
diff --git a/mysql-test/main/subselect_innodb.result b/mysql-test/main/subselect_innodb.result
index 242b01f8955..94f6223157b 100644
--- a/mysql-test/main/subselect_innodb.result
+++ b/mysql-test/main/subselect_innodb.result
@@ -314,7 +314,7 @@ EXPLAIN SELECT 1 FROM t1 WHERE NOT EXISTS
(SELECT 1 FROM t2 WHERE d = (SELECT d FROM t2 WHERE a >= 1) ORDER BY d);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 1 Using where
-2 DEPENDENT SUBQUERY t2 unique_subquery PRIMARY,d PRIMARY 1 func 1 Using where
+2 DEPENDENT SUBQUERY t2 unique_subquery PRIMARY,d d 2 func 1 Using index; Using where
3 DEPENDENT SUBQUERY t2 index NULL d 2 NULL 1 Using index
DROP TABLE t2;
CREATE TABLE t2 (b INT, c INT, UNIQUE KEY (b), UNIQUE KEY (b, c )) ENGINE=INNODB;
@@ -462,7 +462,7 @@ EXPLAIN
SELECT * FROM t1 WHERE EXISTS ( SELECT b FROM t2, t3 GROUP BY b HAVING b != 3 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t2 index NULL PRIMARY 4 NULL 1 Using index; Using temporary
+2 SUBQUERY t2 ALL NULL NULL NULL NULL 1 Using temporary
2 SUBQUERY t3 ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
SELECT * FROM t1 WHERE EXISTS ( SELECT b FROM t2, t3 GROUP BY b HAVING b != 3 );
a
@@ -560,6 +560,7 @@ id select_type table type possible_keys key key_len ref rows Extra
#
# MDEV-6081: ORDER BY+ref(const): selectivity is very incorrect (MySQL Bug#14338686)
#
+insert into t2 select seq,seq,seq from seq_10000_to_11000;
alter table t2 add key2 int;
update t2 set key2=key1;
alter table t2 add key(key2);
@@ -580,6 +581,25 @@ t1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL #
2 DEPENDENT SUBQUERY t2 ref key1 key1 5 test.t1.a # Using where; Using filesort
+select
+(SELECT
+concat(id, '-', key1, '-', col1)
+FROM t2
+WHERE t2.key1 = t1.a
+ORDER BY t2.key2 ASC LIMIT 1) as subq
+from
+t1;
+subq
+100-0-123456
+101-1-123456
+102-2-123456
+103-3-123456
+104-4-123456
+105-5-123456
+106-6-123456
+107-7-123456
+108-8-123456
+109-9-123456
drop table t1,t2;
#
# MDEV-12931: semi-join in ON expression of STRAIGHT_JOIN
@@ -612,11 +632,10 @@ INNER JOIN
ON ( 1 IN ( SELECT f4 FROM t4 ) ) )
ON ( f1 >= f2 );
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
+1 PRIMARY t4 ALL NULL NULL NULL NULL 2 50.00 Using where; FirstMatch
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using join buffer (flat, BNL join)
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (incremental, BNL join)
1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 Using join buffer (incremental, BNL join)
-3 MATERIALIZED t4 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
Note 1003 select `test`.`t1`.`f1` AS `f1`,`test`.`t2`.`f2` AS `f2`,`test`.`t3`.`f3` AS `f3` from `test`.`t1` join `test`.`t2` semi join (`test`.`t4`) join `test`.`t3` where `test`.`t4`.`f4` = 1 and `test`.`t1`.`f1` >= `test`.`t2`.`f2`
DROP TABLE t1,t2,t3,t4;
diff --git a/mysql-test/main/subselect_innodb.test b/mysql-test/main/subselect_innodb.test
index e354ddc4496..840e0adf723 100644
--- a/mysql-test/main/subselect_innodb.test
+++ b/mysql-test/main/subselect_innodb.test
@@ -1,5 +1,6 @@
-- source include/no_valgrind_without_big.inc
-- source include/have_innodb.inc
+-- source include/have_sequence.inc
# Note: the tests uses only non-semijoin subqueries so semi-join switch
# settings are not relevant.
@@ -568,6 +569,11 @@ from
--echo # MDEV-6081: ORDER BY+ref(const): selectivity is very incorrect (MySQL Bug#14338686)
--echo #
+
+# Table t2 has 100 equal values / key value, which causes it to prefer index scan instead of ref
+# Fix it by adding more different values to key1
+insert into t2 select seq,seq,seq from seq_10000_to_11000;
+
alter table t2 add key2 int;
update t2 set key2=key1;
alter table t2 add key(key2);
@@ -583,6 +589,14 @@ explain select
ORDER BY t2.key2 ASC LIMIT 1)
from
t1;
+select
+ (SELECT
+ concat(id, '-', key1, '-', col1)
+ FROM t2
+ WHERE t2.key1 = t1.a
+ ORDER BY t2.key2 ASC LIMIT 1) as subq
+from
+ t1;
drop table t1,t2;
diff --git a/mysql-test/main/subselect_mat.result b/mysql-test/main/subselect_mat.result
index 25465fe650a..a8cad01c674 100644
--- a/mysql-test/main/subselect_mat.result
+++ b/mysql-test/main/subselect_mat.result
@@ -1142,7 +1142,7 @@ a
explain extended
select a from t1 group by a having a in (select c from t2 where d >= 20);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 range NULL it1a 4 NULL 8 100.00 Using index for group-by
+1 PRIMARY t1 range NULL it1a 4 NULL 7 100.00 Using index for group-by
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 7 100.00 Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` group by `test`.`t1`.`a` having <expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,`test`.`t1`.`a` in ( <materialize> (/* select#2 */ select `test`.`t2`.`c` from `test`.`t2` where `test`.`t2`.`d` >= 20 ), <primary_index_lookup>(`test`.`t1`.`a` in <temporary table> on distinct_key where `test`.`t1`.`a` = `<subquery2>`.`c`))))
@@ -1154,7 +1154,7 @@ create index iab on t1(a, b);
explain extended
select a from t1 group by a having a in (select c from t2 where d >= 20);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 range NULL it1a 4 NULL 8 100.00 Using index for group-by
+1 PRIMARY t1 range NULL it1a 4 NULL 7 100.00 Using index for group-by
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 7 100.00 Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` group by `test`.`t1`.`a` having <expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,`test`.`t1`.`a` in ( <materialize> (/* select#2 */ select `test`.`t2`.`c` from `test`.`t2` where `test`.`t2`.`d` >= 20 ), <primary_index_lookup>(`test`.`t1`.`a` in <temporary table> on distinct_key where `test`.`t1`.`a` = `<subquery2>`.`c`))))
@@ -1166,7 +1166,7 @@ explain extended
select a from t1 group by a
having a in (select c from t2 where d >= some(select e from t3 where max(b)=e));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 range NULL iab 4 NULL 8 100.00 Using index for group-by
+1 PRIMARY t1 range NULL iab 4 NULL 7 100.00 Using index for group-by
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 7 100.00 Using where
3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 4 100.00 Using where
Warnings:
@@ -1510,13 +1510,15 @@ SET @@optimizer_switch='semijoin=on,materialization=on';
EXPLAIN SELECT COUNT(*) FROM t1 WHERE (f1,f2) IN (SELECT f1,f2 FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 7 func,func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
SELECT COUNT(*) FROM t1 WHERE (f1,f2) IN (SELECT f1,f2 FROM t2);
COUNT(*)
2
set @@optimizer_switch= @local_optimizer_switch;
DROP TABLE t1, t2;
+#
+# BUG#46548 IN-subqueries return 0 rows with materialization=on
+#
CREATE TABLE t1 (
pk int,
a varchar(1),
@@ -1526,16 +1528,19 @@ d varchar(4),
PRIMARY KEY (pk)
);
INSERT INTO t1 VALUES (1,'o','ffff','ffff','ffoo'),(2,'f','ffff','ffff','ffff');
+insert into t1 select seq,'x','xxxx','xxxx','xxxx' from seq_10_to_40;
CREATE TABLE t2 LIKE t1;
INSERT INTO t2 VALUES (1,'i','iiii','iiii','iiii'),(2,'f','ffff','ffff','ffff');
+insert into t2 select -seq,'a','aaaa','aaaa','aaaa' from seq_1_to_20;
+insert into t2 select seq,'b','bbbb','bbbb','bbbb' from seq_100_to_200;
set @local_optimizer_switch=@@optimizer_switch;
set @@optimizer_switch=@optimizer_switch_local_default;
SET @@optimizer_switch='semijoin=on,materialization=on';
EXPLAIN SELECT pk FROM t1 WHERE (a) IN (SELECT a FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2
+1 PRIMARY t1 ALL NULL NULL NULL NULL 33
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 range PRIMARY PRIMARY 4 NULL 2 Using index condition; Using where; Rowid-ordered scan
+2 MATERIALIZED t2 ALL PRIMARY NULL NULL NULL 123 Using where
SELECT pk FROM t1 WHERE (a) IN (SELECT a FROM t2 WHERE pk > 0);
pk
2
@@ -1890,19 +1895,20 @@ WHERE alias4.c = alias3.b
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
3 MATERIALIZED alias3 ALL NULL NULL NULL NULL 2 Using where
-3 MATERIALIZED alias4 ref c c 11 test.alias3.b 2 Using where; Using index
+3 MATERIALIZED alias4 ref c c 11 test.alias3.b 1 Using where; Using index
DROP TABLE t1,t2;
#
# BUG#928048: Query containing IN subquery with OR in the where clause returns a wrong result
#
create table t1 (a int, b int);
insert into t1 values (7,5), (3,3), (5,4), (9,3);
+insert into t1 select seq,seq from seq_100_to_200;
create table t2 (a int, b int, index i_a(a));
insert into t2 values
(4,2), (7,9), (7,4), (3,1), (5,3), (3,1), (9,4), (8,1);
explain select * from t1 where t1.a in (select a from t2 where t2.a=7 or t2.b<=1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 105 Using where
2 MATERIALIZED t2 ALL i_a NULL NULL NULL 8 Using where
select * from t1 where t1.a in (select a from t2 where t2.a=7 or t2.b<=1);
a b
@@ -2194,9 +2200,8 @@ mysqltest1
EXPLAIN EXTENDED
SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2 100.00 Using temporary; Using filesort
-1 PRIMARY t1 eq_ref db db 764 information_schema.schemata.SCHEMA_NAME 1 100.00 Using where; Using index
-2 MATERIALIZED schemata ALL NULL NULL NULL NULL NULL NULL
+1 PRIMARY t1 index db db 764 NULL 4 100.00 Using index; Using temporary; Using filesort
+1 PRIMARY schemata ALL NULL NULL NULL NULL NULL NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`db` AS `db` from `test`.`t1` semi join (`information_schema`.`schemata`) where `test`.`t1`.`db` = `information_schema`.`schemata`.`SCHEMA_NAME` order by `test`.`t1`.`db` desc
drop table t1;
@@ -2228,8 +2233,10 @@ drop table t1;
CREATE TABLE t1 (
pk INT, f1 INT NOT NULL, f2 VARCHAR(3), f3 INT NULL, PRIMARY KEY(pk)) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,1,'foo',8), (2,5,'bar',7);
+create table t2 like t1;
+insert into t2 select * from t1;
SELECT sq1.f2 FROM t1 AS sq1
-WHERE EXISTS ( SELECT * FROM t1 AS sq2
+WHERE EXISTS ( SELECT * FROM t2 AS sq2
WHERE sq1.`pk` IN ( SELECT f1 FROM t1 ) AND sq2.f1 = sq1.f1 );
f2
foo
@@ -2241,18 +2248,17 @@ WHERE EXISTS ( SELECT * FROM t1 AS sq2
WHERE sq1.`pk` IN ( SELECT f1 FROM t1 ) AND sq2.f1 = sq1.f1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY sq1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 func 1
+2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using where; FirstMatch
2 DEPENDENT SUBQUERY sq2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
-3 MATERIALIZED t1 ALL NULL NULL NULL NULL 2
# this checks the result set above
set optimizer_switch= 'materialization=off,semijoin=off';
SELECT sq1.f2 FROM t1 AS sq1
-WHERE EXISTS ( SELECT * FROM t1 AS sq2
+WHERE EXISTS ( SELECT * FROM t2 AS sq2
WHERE sq1.`pk` IN ( SELECT f1 FROM t1 ) AND sq2.f1 = sq1.f1 );
f2
foo
set optimizer_switch= @local_optimizer_switch;
-DROP TABLE t1;
+DROP TABLE t1,t2;
#
# MDEV-12145: IN subquery used in WHERE of EXISTS subquery
#
@@ -2275,10 +2281,9 @@ WHERE EXISTS ( SELECT * FROM t2, t3
WHERE i3 = i2 AND f1 IN ( SELECT f3 FROM t3 ) );
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
+2 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 8 12.50 Using where; FirstMatch
2 DEPENDENT SUBQUERY t2 range i2 i2 5 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY t3 ref i3 i3 5 test.t2.i2 2 100.00 Using index
-3 MATERIALIZED t3 ALL NULL NULL NULL NULL 8 100.00
+2 DEPENDENT SUBQUERY t3 ref i3 i3 5 test.t2.i2 1 100.00 Using index
Warnings:
Note 1276 Field or reference 'test.t1.f1' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select `test`.`t1`.`f1` AS `f1` from `test`.`t1` where <expr_cache><`test`.`t1`.`f1`>(exists(/* select#2 */ select 1 from `test`.`t2` semi join (`test`.`t3`) join `test`.`t3` where `test`.`t3`.`i3` = `test`.`t2`.`i2` and `test`.`t1`.`f1` = `test`.`t3`.`f3` limit 1))
@@ -2314,9 +2319,8 @@ SELECT pk, f1, ( SELECT COUNT(*) FROM t2
WHERE t1.pk IN ( SELECT f2 FROM t2 ) ) AS sq FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00
-2 DEPENDENT SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 20.00 Using where; FirstMatch
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using join buffer (flat, BNL join)
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00
Warnings:
Note 1276 Field or reference 'test.t1.pk' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`f1` AS `f1`,<expr_cache><`test`.`t1`.`pk`>((/* select#2 */ select count(0) from `test`.`t2` semi join (`test`.`t2`) where `test`.`t1`.`pk` = `test`.`t2`.`f2`)) AS `sq` from `test`.`t1`
@@ -2399,11 +2403,10 @@ WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
t2.user = '86826bf03710200044e0bfc8bcbe5d79');
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t2.ugroup 2 Using where
+1 PRIMARY t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where; Start temporary
+1 PRIMARY t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
+1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t3_i.sys_id 2 Using index condition; Using where; End temporary
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 32 test.t1.assignment_group 1 Using where; Using index
-2 MATERIALIZED t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where
-2 MATERIALIZED t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
set statement optimizer_prune_level=1 for explain SELECT t1.assignment_group
FROM t1, t3
WHERE t1.assignment_group = t3.sys_id AND
@@ -2414,11 +2417,10 @@ WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
t2.user = '86826bf03710200044e0bfc8bcbe5d79');
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery3> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t2.ugroup 2 Using where
+1 PRIMARY t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where; Start temporary
+1 PRIMARY t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
+1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t3_i.sys_id 2 Using index condition; Using where; End temporary
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 32 test.t1.assignment_group 1 Using where; Using index
-3 MATERIALIZED t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where
-3 MATERIALIZED t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
SELECT t1.assignment_group
FROM t1, t3
WHERE t1.assignment_group = t3.sys_id AND
@@ -2450,8 +2452,7 @@ explain
SELECT 1 FROM t1 where t1.id IN (SELECT t2.i1 FROM t2 WHERE t2.i1 = t2.i2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 9
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY t2 hash_ALL NULL #hash#$hj 8 test.t1.id,test.t1.id 3 Using where; FirstMatch(t1); Using join buffer (flat, BNLH join)
SELECT 1 FROM t1 where t1.id IN (SELECT t2.i1 FROM t2 WHERE t2.i1 = t2.i2);
1
1
@@ -2463,8 +2464,7 @@ explain
SELECT 1 FROM t1 where t1.id IN (SELECT t2.i1 FROM t2 WHERE t2.i1 = t2.i2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index id id 4 NULL 9 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
SELECT 1 FROM t1 where t1.id IN (SELECT t2.i1 FROM t2 WHERE t2.i1 = t2.i2);
1
1
@@ -2513,20 +2513,17 @@ drop procedure prepare_data;
set @@optimizer_switch= @local_optimizer_switch;
drop table t1,t2,t3;
CREATE TABLE t1 ( id int NOT NULL, key(id));
-INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19);
+INSERT INTO t1 select seq from seq_11_to_39;
CREATE TABLE t2 (i1 int NOT NULL, i2 int NOT NULL);
-INSERT INTO t2 VALUES (11,11),(12,12),(13,13);
+INSERT INTO t2 select seq,seq+1 from seq_11_to_50;
CREATE VIEW v1 AS SELECT t2.i1 FROM t2 where t2.i1 = t2.i2;
explain SELECT 1 FROM t1 where t1.id IN (SELECT v1.i1 from v1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 index id id 4 NULL 9 Using index
+1 PRIMARY t1 index id id 4 NULL 29 Using index
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 40 Using where
SELECT 1 FROM t1 where t1.id IN (SELECT v1.i1 from v1);
1
-1
-1
-1
drop table t1,t2;
drop view v1;
#
@@ -2817,12 +2814,12 @@ PRIMARY KEY (pk)
INSERT INTO t1 VALUES (1,'o','ffff','ffff','ffoo'),(2,'f','ffff','ffff','ffff');
CREATE TABLE t2 LIKE t1;
INSERT INTO t2 VALUES (1,'i','iiii','iiii','iiii'),(2,'f','ffff','ffff','ffff');
+insert into t2 select -seq,"","","","" from seq_1_to_10;
SET @@optimizer_switch='default,semijoin=on,materialization=on';
EXPLAIN SELECT pk FROM t1 WHERE (a) IN (SELECT a FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 range PRIMARY PRIMARY 4 NULL 2 Using index condition; Using where
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 Using index condition; Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
SELECT pk FROM t1 WHERE (a) IN (SELECT a FROM t2 WHERE pk > 0);
pk
2
diff --git a/mysql-test/main/subselect_mat.test b/mysql-test/main/subselect_mat.test
index cacafb0000f..58548d18caa 100644
--- a/mysql-test/main/subselect_mat.test
+++ b/mysql-test/main/subselect_mat.test
@@ -2,7 +2,7 @@
# Hash semi-join regression tests
# (WL#1110: Subquery optimization: materialization)
#
-
+--source include/have_sequence.inc
# force the use of materialization
set @subselect_mat_test_optimizer_switch_value='materialization=on,in_to_exists=off,semijoin=off';
@@ -111,6 +111,7 @@ INSERT INTO t1 VALUES (1,'o','ffff','ffff','ffoo'),(2,'f','ffff','ffff','ffff');
CREATE TABLE t2 LIKE t1;
INSERT INTO t2 VALUES (1,'i','iiii','iiii','iiii'),(2,'f','ffff','ffff','ffff');
+insert into t2 select -seq,"","","","" from seq_1_to_10;
SET @@optimizer_switch='default,semijoin=on,materialization=on';
EXPLAIN SELECT pk FROM t1 WHERE (a) IN (SELECT a FROM t2 WHERE pk > 0);
diff --git a/mysql-test/main/subselect_mat_cost-master.opt b/mysql-test/main/subselect_mat_cost.opt
index cb4a9db9617..cb4a9db9617 100644
--- a/mysql-test/main/subselect_mat_cost-master.opt
+++ b/mysql-test/main/subselect_mat_cost.opt
diff --git a/mysql-test/main/subselect_mat_cost.result b/mysql-test/main/subselect_mat_cost.result
index 66d48b549c4..240332f3c5b 100644
--- a/mysql-test/main/subselect_mat_cost.result
+++ b/mysql-test/main/subselect_mat_cost.result
@@ -39,8 +39,6 @@ create index Language on CountryLanguage(Language);
create index CityName on City(Name);
alter table City change population population int(11) null default 0;
select max(id) from City into @max_city_id;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into City values (@max_city_id + 1,'Kilifarevo','BGR',NULL);
SELECT COUNT(*) FROM Country;
COUNT(*)
@@ -60,47 +58,19 @@ Q1.1m:
MATERIALIZATION: there are too many rows in the outer query
to be looked up in the inner table.
EXPLAIN
-SELECT Name FROM Country
+SELECT count(*) FROM Country
WHERE (Code IN (select Country from City where City.Population > 100000) OR
Name LIKE 'L%') AND
-surfacearea > 1000000;
+surfacearea > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country ALL Name,SurfaceArea NULL NULL NULL 239 Using where
2 MATERIALIZED City ALL Population,Country NULL NULL NULL 4079 Using where
-SELECT Name FROM Country
+SELECT count(*) FROM Country
WHERE (Code IN (select Country from City where City.Population > 100000) OR
Name LIKE 'L%') AND
-surfacearea > 1000000;
-Name
-Algeria
-Angola
-Argentina
-Australia
-Bolivia
-Brazil
-Egypt
-South Africa
-Ethiopia
-Indonesia
-India
-Iran
-Canada
-Kazakstan
-China
-Colombia
-Congo, The Democratic Republic of the
-Libyan Arab Jamahiriya
-Mali
-Mauritania
-Mexico
-Mongolia
-Niger
-Peru
-Saudi Arabia
-Sudan
-Chad
-Russian Federation
-United States
+surfacearea > 100000;
+count(*)
+107
Q1.1e:
IN-EXISTS: the materialization cost is the same as above, but
there are much fewer outer rows to be looked up, thus the
@@ -136,10 +106,22 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country ALL PRIMARY,SurfaceArea NULL NULL NULL 239 Using where
1 PRIMARY City ref Country Country 3 world.Country.Code 17 Using where
2 MATERIALIZED CountryLanguage ALL Percentage,Language NULL NULL NULL 984 Using where
+EXPLAIN
SELECT *
FROM Country, City
WHERE City.Country = Country.Code AND
-Country.SurfaceArea < 3000 AND Country.SurfaceArea > 10 AND
+Country.SurfaceArea < 500 AND Country.SurfaceArea > 10 AND
+(City.Name IN
+(select Language from CountryLanguage where Percentage > 50) OR
+City.name LIKE '%Island%');
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY Country range PRIMARY,SurfaceArea SurfaceArea 4 NULL 32 Using index condition; Rowid-ordered scan
+1 PRIMARY City ref Country Country 3 world.Country.Code 17 Using where
+2 MATERIALIZED CountryLanguage ALL Percentage,Language NULL NULL NULL 984 Using where
+SELECT *
+FROM Country, City
+WHERE City.Country = Country.Code AND
+Country.SurfaceArea < 500 AND Country.SurfaceArea > 10 AND
(City.Name IN
(select Language from CountryLanguage where Percentage > 50) OR
City.name LIKE '%Island%');
@@ -515,9 +497,30 @@ FROM City JOIN Country ON City.Country = Country.Code
GROUP BY City.Name
HAVING City.Name IN (select Name from Country where population < 1000000);
id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY City ALL Country NULL NULL NULL 4079 Using temporary; Using filesort
+1 PRIMARY Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using index
+2 MATERIALIZED Country ALL Name NULL NULL NULL 239 Using where
+Last_query_cost 5.934845
+EXPLAIN
+SELECT straight_join City.Name, City.Population
+FROM Country JOIN City ON City.Country = Country.Code
+GROUP BY City.Name
+HAVING City.Name IN (select Name from Country where population < 1000000);
+id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country index PRIMARY PRIMARY 3 NULL 239 Using index; Using temporary; Using filesort
1 PRIMARY City ref Country Country 3 world.Country.Code 17
2 MATERIALIZED Country ALL Name NULL NULL NULL 239 Using where
+Last_query_cost 7.972882
+EXPLAIN
+SELECT City.Name, City.Population
+FROM Country LEFT JOIN City ON City.Country = Country.Code
+GROUP BY City.Name
+HAVING City.Name IN (select Name from Country where population < 1000000);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY Country index NULL PRIMARY 3 NULL 239 Using index; Using temporary; Using filesort
+1 PRIMARY City ref Country Country 3 world.Country.Code 17
+2 MATERIALIZED Country ALL Name NULL NULL NULL 239 Using where
+Last_query_cost 7.972882
SELECT City.Name, City.Population
FROM City JOIN Country ON City.Country = Country.Code
GROUP BY City.Name
diff --git a/mysql-test/main/subselect_mat_cost.test b/mysql-test/main/subselect_mat_cost.test
index 8fe38849735..60763076c45 100644
--- a/mysql-test/main/subselect_mat_cost.test
+++ b/mysql-test/main/subselect_mat_cost.test
@@ -74,15 +74,15 @@ set @@optimizer_switch = 'in_to_exists=on,semijoin=on,materialization=on,partial
-- echo MATERIALIZATION: there are too many rows in the outer query
-- echo to be looked up in the inner table.
EXPLAIN
-SELECT Name FROM Country
+SELECT count(*) FROM Country
WHERE (Code IN (select Country from City where City.Population > 100000) OR
Name LIKE 'L%') AND
- surfacearea > 1000000;
+ surfacearea > 100000;
-SELECT Name FROM Country
+SELECT count(*) FROM Country
WHERE (Code IN (select Country from City where City.Population > 100000) OR
Name LIKE 'L%') AND
- surfacearea > 1000000;
+ surfacearea > 100000;
-- echo Q1.1e:
-- echo IN-EXISTS: the materialization cost is the same as above, but
@@ -113,10 +113,19 @@ SELECT *
(select Language from CountryLanguage where Percentage > 50) OR
City.name LIKE '%Island%');
+EXPLAIN
SELECT *
FROM Country, City
WHERE City.Country = Country.Code AND
- Country.SurfaceArea < 3000 AND Country.SurfaceArea > 10 AND
+ Country.SurfaceArea < 500 AND Country.SurfaceArea > 10 AND
+ (City.Name IN
+ (select Language from CountryLanguage where Percentage > 50) OR
+ City.name LIKE '%Island%');
+
+SELECT *
+ FROM Country, City
+ WHERE City.Country = Country.Code AND
+ Country.SurfaceArea < 500 AND Country.SurfaceArea > 10 AND
(City.Name IN
(select Language from CountryLanguage where Percentage > 50) OR
City.name LIKE '%Island%');
@@ -210,7 +219,6 @@ WHERE Code NOT IN (SELECT Country FROM CountryLanguage WHERE Language = 'English
-- echo MATERIALIZATION because the outer query filters less rows than Q5-a,
-- echo so there are more lookups.
-
set statement optimizer_switch='rowid_filter=off' for
EXPLAIN
SELECT Country.Name
@@ -369,17 +377,32 @@ drop index CountryCapital on Country;
# TODO: the cost estimates for subqueries in the HAVING clause need to be changed
# to take into account that the subquery predicate is executed #times ~ to the
# number of groups, not number of rows
+
EXPLAIN
SELECT City.Name, City.Population
FROM City JOIN Country ON City.Country = Country.Code
GROUP BY City.Name
HAVING City.Name IN (select Name from Country where population < 1000000);
+--source include/last_query_cost.inc
+
+EXPLAIN
+SELECT straight_join City.Name, City.Population
+FROM Country JOIN City ON City.Country = Country.Code
+GROUP BY City.Name
+HAVING City.Name IN (select Name from Country where population < 1000000);
+--source include/last_query_cost.inc
+EXPLAIN
SELECT City.Name, City.Population
-FROM City JOIN Country ON City.Country = Country.Code
+FROM Country LEFT JOIN City ON City.Country = Country.Code
GROUP BY City.Name
HAVING City.Name IN (select Name from Country where population < 1000000);
+--source include/last_query_cost.inc
+SELECT City.Name, City.Population
+FROM City JOIN Country ON City.Country = Country.Code
+GROUP BY City.Name
+HAVING City.Name IN (select Name from Country where population < 1000000);
-- echo
-- echo 5. Subqueries with UNION
diff --git a/mysql-test/main/subselect_mat_cost_bugs.result b/mysql-test/main/subselect_mat_cost_bugs.result
index 1889291398c..77b3430ba1f 100644
--- a/mysql-test/main/subselect_mat_cost_bugs.result
+++ b/mysql-test/main/subselect_mat_cost_bugs.result
@@ -95,9 +95,9 @@ t1a ON (t1a.c2 = t1b.pk AND 2)
WHERE t1.pk) ;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 9 100.00 Using where
-2 DEPENDENT SUBQUERY t1b ALL NULL NULL NULL NULL 9 100.00
+2 DEPENDENT SUBQUERY t1b ALL NULL NULL NULL NULL 9 100.00 Using where
2 DEPENDENT SUBQUERY t1a ref c2 c2 5 test.t1b.pk 1 100.00 Using where
-2 DEPENDENT SUBQUERY t2 index c3 c3 9 NULL 2 100.00 Using where; Using index; Using join buffer (flat, BNL join)
+2 DEPENDENT SUBQUERY t2 ref c3 c3 4 test.t1b.c4 1 100.00 Using index
Warnings:
Note 1276 Field or reference 'test.t1.pk' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` where <expr_cache><`test`.`t1`.`c1`,`test`.`t1`.`pk`>(<in_optimizer>(`test`.`t1`.`c1`,<exists>(/* select#2 */ select `test`.`t1a`.`c1` from `test`.`t1b` join `test`.`t2` left join `test`.`t1a` on(`test`.`t1a`.`c2` = `test`.`t1b`.`pk` and 2) where `test`.`t2`.`c3` = `test`.`t1b`.`c4` and `test`.`t1`.`pk` <> 0 and <cache>(`test`.`t1`.`c1`) = `test`.`t1a`.`c1`)))
@@ -263,8 +263,8 @@ WHERE alias1.f11 OR alias1.f3 = 50 AND alias1.f10
);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where
-2 MATERIALIZED alias1 ALL NULL NULL NULL NULL 2 Using where
-2 MATERIALIZED alias2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+2 DEPENDENT SUBQUERY alias1 ALL NULL NULL NULL NULL 2 Using where
+2 DEPENDENT SUBQUERY alias2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
SELECT * FROM t2
WHERE ( f12 ) IN (
SELECT alias2.f3
@@ -275,6 +275,8 @@ f12 f13
Warnings:
Warning 1292 Truncated incorrect DECIMAL value: 'f'
Warning 1292 Truncated incorrect DECIMAL value: 'd'
+Warning 1292 Truncated incorrect DECIMAL value: 'f'
+Warning 1292 Truncated incorrect DECIMAL value: 'd'
EXPLAIN
SELECT * FROM t2
WHERE ( f12 ) IN (
@@ -283,8 +285,8 @@ FROM t1 AS alias1, t1 AS alias2
WHERE (alias2.f10 = alias1.f11) AND (alias1.f11 OR alias1.f3 = 50 AND alias1.f10));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where
-2 MATERIALIZED alias1 ALL NULL NULL NULL NULL 2 Using where
-2 MATERIALIZED alias2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+2 DEPENDENT SUBQUERY alias1 ALL NULL NULL NULL NULL 2 Using where
+2 DEPENDENT SUBQUERY alias2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
SELECT * FROM t2
WHERE ( f12 ) IN (
SELECT alias2.f3
@@ -294,6 +296,8 @@ f12 f13
Warnings:
Warning 1292 Truncated incorrect DECIMAL value: 'f'
Warning 1292 Truncated incorrect DECIMAL value: 'd'
+Warning 1292 Truncated incorrect DECIMAL value: 'f'
+Warning 1292 Truncated incorrect DECIMAL value: 'd'
set @@optimizer_switch=@save_optimizer_switch;
drop table t1, t2;
#
@@ -316,7 +320,7 @@ explain
select c1 from t1 where c1 in (select kp1 from t2 where kp2 = 10 and c2 = 4) or c1 > 7;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-2 DEPENDENT SUBQUERY t2 ref key1,key2,key3 key3 5 const 1 Using where
+2 DEPENDENT SUBQUERY t2 index_subquery key1,key2,key3 key1 10 func,const 1 Using where
select c1 from t1 where c1 in (select kp1 from t2 where kp2 = 10 and c2 = 4) or c1 > 7;
c1
set @@optimizer_switch='default';
diff --git a/mysql-test/main/subselect_no_exists_to_in.result b/mysql-test/main/subselect_no_exists_to_in.result
index e32e6007328..3a144e70c75 100644
--- a/mysql-test/main/subselect_no_exists_to_in.result
+++ b/mysql-test/main/subselect_no_exists_to_in.result
@@ -899,6 +899,9 @@ select (select a+1) from t1;
NULL
4.5
drop table t1;
+#
+# Null with keys
+#
CREATE TABLE t1 (a int(11) NOT NULL default '0', PRIMARY KEY (a));
CREATE TABLE t2 (a int(11) default '0', INDEX (a));
INSERT INTO t1 VALUES (1),(2),(3),(4);
@@ -1430,6 +1433,9 @@ drop table if exists t1;
(SELECT 1 as a) UNION (SELECT 1) ORDER BY (SELECT a+0);
a
1
+#
+# IN subselect optimization test
+#
create table t1 (a int not null, b int, primary key (a));
create table t2 (a int not null, primary key (a));
create table t3 (a int not null, b int, primary key (a));
@@ -1453,21 +1459,21 @@ a
4
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using where
+1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 100.00 Using where
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00 Using index
Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
a
2
3
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00
-1 PRIMARY t3 index PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 100.00 Using where
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00 Using index
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 100.00 Using index
Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t1`.`a` = `test`.`t2`.`a`
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t2`.`a` = `test`.`t1`.`a`
drop table t1, t2, t3;
create table t1 (a int, b int, index a (a,b));
create table t2 (a int, index a (a));
@@ -1476,42 +1482,48 @@ insert into t1 values (1,10), (2,20), (3,30), (4,40);
create table t0(a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
insert into t1
-select rand()*100000+200,rand()*100000 from t0 A, t0 B, t0 C, t0 D;
+select rand()*100000+200,rand(1)*100000 from t0 A, t0 B, t0 C, t0 D;
insert into t2 values (2), (3), (4), (5);
insert into t3 values (10,3), (20,4), (30,5);
-select * from t2 where t2.a in (select a from t1);
-a
-2
-3
-4
explain extended select * from t2 where t2.a in (select a from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a`
-select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+select * from t2 where t2.a in (select a from t1);
a
2
+3
4
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using where; Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using where; Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
-select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
-3
+4
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
1 PRIMARY t3 range a a 5 NULL 3 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 10 test.t2.a,test.t3.a 116 100.00 Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 10 test.t2.a,test.t3.a 116 0.29 Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1` join `test`.`t3`) where `test`.`t1`.`b` = `test`.`t3`.`a` and `test`.`t1`.`a` = `test`.`t2`.`a`
+select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+a
+2
+3
insert into t1 values (3,31);
+explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using where; Using index; FirstMatch(t2)
+Warnings:
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
@@ -1521,12 +1533,6 @@ select * from t2 where t2.a in (select a from t1 where t1.b <> 30 and t1.b <> 31
a
2
4
-explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using where; Using index; FirstMatch(t2)
-Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
drop table t0, t1, t2, t3;
create table t1 (a int, b int);
create table t2 (a int, b int);
@@ -1588,6 +1594,9 @@ Note 1003 (select 'tttt' AS `s1` from dual)
s1
tttt
drop table t1;
+#
+# IN optimisation test results
+#
create table t1 (s1 char(5), index s1(s1));
create table t2 (s1 char(5), index s1(s1));
insert into t1 values ('a1'),('a2'),('a3');
@@ -2421,19 +2430,22 @@ a
1
3
DROP TABLE t1;
+#
+# SELECT(EXISTS * ...)optimisation
+#
create table t1 (a int, b int);
-insert into t1 values (1,2),(3,4);
-select * from t1 up where exists (select * from t1 where t1.a=up.a);
-a b
-1 2
-3 4
-explain extended select * from t1 up where exists (select * from t1 where t1.a=up.a);
+insert into t1 values (1,2),(3,4),(5,6),(7,8);
+insert into t1 select seq,seq from seq_20_to_40;
+select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
+sum(a+b)
+1296
+explain extended select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY up ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY up ALL NULL NULL NULL NULL 25 100.00 Using where
+2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 25 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.up.a' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `test`.`up`.`a` AS `a`,`test`.`up`.`b` AS `b` from `test`.`t1` `up` where <expr_cache><`test`.`up`.`a`>(exists(/* select#2 */ select 1 from `test`.`t1` where `test`.`t1`.`a` = `test`.`up`.`a` limit 1))
+Note 1003 /* select#1 */ select sum(`test`.`up`.`a` + `test`.`up`.`b`) AS `sum(a+b)` from `test`.`t1` `up` where <expr_cache><`test`.`up`.`a`>(exists(/* select#2 */ select 1 from `test`.`t1` where `test`.`t1`.`a` = `test`.`up`.`a` limit 1))
drop table t1;
CREATE TABLE t1 (t1_a int);
INSERT INTO t1 VALUES (1);
@@ -3101,9 +3113,13 @@ retailerID statusID changed
0048 1 2006-01-06 12:37:50
0059 1 2006-01-06 12:37:50
drop table t1;
+#
+# Bug#21180 Subselect with index for both WHERE and ORDER BY
+# produces empty result
+#
create table t1(a int, primary key (a));
insert into t1 values (10);
-create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
+create table t2 (a int primary key, b varchar(32), c int, unique key cb(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
analyze table t1;
@@ -3116,7 +3132,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using where
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3128,7 +3144,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -4228,8 +4244,8 @@ INSERT INTO t2 VALUES (7), (5), (1), (3);
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id);
id st
-3 FL
1 GA
+3 FL
7 FL
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id)
@@ -4330,6 +4346,9 @@ SELECT ((a1,a2) IN (SELECT * FROM t2 WHERE b2 > 0)) IS NULL FROM t1;
0
0
DROP TABLE t1, t2;
+#
+# Bug#28076 inconsistent binary/varbinary comparison
+#
CREATE TABLE t1 (s1 BINARY(5), s2 VARBINARY(5));
INSERT INTO t1 VALUES (0x41,0x41), (0x42,0x42), (0x43,0x43);
SELECT s1, s2 FROM t1 WHERE s2 IN (SELECT s1 FROM t1);
@@ -4391,8 +4410,8 @@ CREATE INDEX I1 ON t1 (a);
CREATE INDEX I2 ON t1 (b);
EXPLAIN SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t1 ref I1 I1 2 test.t1.b 2 Using where; Using index; FirstMatch(t1)
+1 PRIMARY t1 index I1 I1 2 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t1 ref I2 I2 13 test.t1.a 1 Using index condition
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
a b
CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(10));
@@ -4401,15 +4420,15 @@ CREATE INDEX I1 ON t2 (a);
CREATE INDEX I2 ON t2 (b);
EXPLAIN SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t2 ref I1 I1 4 test.t2.b 2 Using where; Using index; FirstMatch(t2)
+1 PRIMARY t2 index I1 I1 4 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t2 ref I2 I2 13 test.t2.a 1 Using index condition
SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
a b
EXPLAIN
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t1 ref I1 I1 2 test.t1.b 2 Using where; Using index; FirstMatch(t1)
+1 PRIMARY t1 index I1 I1 2 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t1 ref I2 I2 13 test.t1.a 1 Using index condition
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
a b
DROP TABLE t1,t2;
@@ -4439,10 +4458,13 @@ out_a MIN(b)
1 2
2 4
DROP TABLE t1;
+#
+# Bug#32036 EXISTS within a WHERE clause with a UNION crashes MySQL 5.122
+#
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (a INT);
-INSERT INTO t1 VALUES (1),(2);
-INSERT INTO t2 VALUES (1),(2);
+INSERT INTO t1 VALUES (1),(2),(3),(4);
+INSERT INTO t2 VALUES (1),(2),(1000);
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
2
2
@@ -4450,8 +4472,8 @@ SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select 2 AS `2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a`>(exists(/* select#2 */ select 1 from `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a` limit 1))
@@ -4459,9 +4481,9 @@ EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a) UNION
(SELECT 1 FROM t2 WHERE t1.a = t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 3 100.00 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
@@ -5700,7 +5722,8 @@ DROP TABLE IF EXISTS ot1, ot4, it2, it3;
CREATE TABLE t1 (a int) ;
INSERT INTO t1 VALUES (NULL), (1), (NULL), (2);
CREATE TABLE t2 (a int, INDEX idx(a)) ;
-INSERT INTO t2 VALUES (NULL), (1), (NULL);
+INSERT INTO t2 VALUES (NULL), (1), (NULL),(1000);
+insert into t2 select seq from seq_3_to_500;
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 USE INDEX () WHERE t2.a = t1.a);
a
@@ -5710,7 +5733,7 @@ SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 USE INDEX() WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 502 Using where
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
a
@@ -5720,7 +5743,7 @@ SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
-2 DEPENDENT SUBQUERY t2 ref idx idx 5 test.t1.a 2 Using index
+2 DEPENDENT SUBQUERY t2 ref idx idx 5 test.t1.a 58 Using index
DROP TABLE t1,t2;
#
# BUG#752992: Wrong results for a subquery with 'semijoin=on'
@@ -5737,9 +5760,9 @@ SET @save_join_cache_level=@@join_cache_level;
SET join_cache_level=0;
EXPLAIN SELECT * FROM t1 WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON 1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 3
-1 PRIMARY it eq_ref PRIMARY PRIMARY 4 test.t1.pk 1 Using index
-1 PRIMARY t2 index NULL PRIMARY 4 NULL 3 Using index; FirstMatch(it)
+1 PRIMARY it index PRIMARY PRIMARY 4 NULL 3 Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.it.pk 1
+1 PRIMARY t2 index NULL PRIMARY 4 NULL 3 Using index; FirstMatch(t1)
SELECT * FROM t1 WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON 1);
pk i
11 0
@@ -6080,8 +6103,7 @@ WHERE col_varchar_nokey IN
(SELECT col_varchar_key FROM it1 WHERE col_int_key IS NULL);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY ot system NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED it1 ref idx_cvk_cik idx_cvk_cik 9 const,const 1 Using where; Using index
+1 PRIMARY it1 ref idx_cvk_cik idx_cvk_cik 9 const,const 1 Using where; Using index; FirstMatch(ot)
SELECT col_int_nokey FROM ot
WHERE col_varchar_nokey IN
(SELECT col_varchar_key FROM it1 WHERE col_int_key IS NULL);
@@ -6093,8 +6115,7 @@ WHERE (col_varchar_nokey, 'x') IN
(SELECT col_varchar_key, col_varchar_key2 FROM it2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY ot system NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1
-2 MATERIALIZED it2 ref idx_cvk_cvk2_cik,idx_cvk_cik idx_cvk_cvk2_cik 8 const,const 1 Using where; Using index
+1 PRIMARY it2 ref idx_cvk_cvk2_cik,idx_cvk_cik idx_cvk_cvk2_cik 8 const,const 1 Using where; Using index; FirstMatch(ot)
SELECT col_int_nokey FROM ot
WHERE (col_varchar_nokey, 'x') IN
(SELECT col_varchar_key, col_varchar_key2 FROM it2);
@@ -6638,7 +6659,7 @@ SET @@optimizer_switch='semijoin=off,materialization=off,in_to_exists=on,subquer
EXPLAIN SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index NULL a 4 NULL 2 Using where; Using index
-2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 2 Using index
+2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 1 Using index
SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
a
2009-01-01
@@ -6841,7 +6862,7 @@ FROM t1 AS alias1, t1 AS alias2, t1 AS alias3
WHERE alias1.a = alias2.a OR ('Moscow') IN ( SELECT a FROM t1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY alias1 index a a 19 NULL 11 Using where; Using index
-1 PRIMARY alias2 ref a a 19 test.alias1.a 2 Using index
+1 PRIMARY alias2 ref a a 19 test.alias1.a 1 Using index
1 PRIMARY alias3 index NULL a 19 NULL 11 Using index; Using join buffer (flat, BNL join)
2 SUBQUERY t1 index_subquery a a 19 const 1 Using index; Using where
SELECT MAX( alias2.a )
@@ -6992,7 +7013,7 @@ WHERE SLEEP(0.1) OR c < 'p' OR b = ( SELECT MIN(b) FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
1 PRIMARY t2 ALL b NULL NULL NULL 2 Using where
-1 PRIMARY t3 ref d d 5 test.t2.b 2 Using index
+1 PRIMARY t3 ref d d 5 test.t2.b 1 Using index
3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
set @tmp_mdev410=@@global.userstat;
set global userstat=on;
@@ -7024,7 +7045,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-435: Expensive subqueries may be evaluated during optimization in merge_key_fields
@@ -7058,7 +7079,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-5991: crash in Item_field::used_tables
diff --git a/mysql-test/main/subselect_no_mat.result b/mysql-test/main/subselect_no_mat.result
index 07755a5144a..ca68e4d7e03 100644
--- a/mysql-test/main/subselect_no_mat.result
+++ b/mysql-test/main/subselect_no_mat.result
@@ -352,7 +352,7 @@ patient_uq clinic_uq
explain extended select * from t6 where exists (select * from t7 where uq = clinic_uq);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t7 index PRIMARY PRIMARY 4 NULL 2 100.00 Using index
-1 PRIMARY t6 ALL i1 NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t6 ref i1 i1 5 test.t7.uq 1 100.00
Warnings:
Note 1276 Field or reference 'test.t6.clinic_uq' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t6`.`patient_uq` AS `patient_uq`,`test`.`t6`.`clinic_uq` AS `clinic_uq` from `test`.`t7` join `test`.`t6` where `test`.`t6`.`clinic_uq` = `test`.`t7`.`uq`
@@ -902,6 +902,9 @@ select (select a+1) from t1;
NULL
4.5
drop table t1;
+#
+# Null with keys
+#
CREATE TABLE t1 (a int(11) NOT NULL default '0', PRIMARY KEY (a));
CREATE TABLE t2 (a int(11) default '0', INDEX (a));
INSERT INTO t1 VALUES (1),(2),(3),(4);
@@ -1433,6 +1436,9 @@ drop table if exists t1;
(SELECT 1 as a) UNION (SELECT 1) ORDER BY (SELECT a+0);
a
1
+#
+# IN subselect optimization test
+#
create table t1 (a int not null, b int, primary key (a));
create table t2 (a int not null, primary key (a));
create table t3 (a int not null, b int, primary key (a));
@@ -1456,21 +1462,21 @@ a
4
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using where
+1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 100.00 Using where
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00 Using index
Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
a
2
3
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00
-1 PRIMARY t3 index PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 100.00 Using where
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00 Using index
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 100.00 Using index
Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t1`.`a` = `test`.`t2`.`a`
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t2`.`a` = `test`.`t1`.`a`
drop table t1, t2, t3;
create table t1 (a int, b int, index a (a,b));
create table t2 (a int, index a (a));
@@ -1479,42 +1485,48 @@ insert into t1 values (1,10), (2,20), (3,30), (4,40);
create table t0(a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
insert into t1
-select rand()*100000+200,rand()*100000 from t0 A, t0 B, t0 C, t0 D;
+select rand()*100000+200,rand(1)*100000 from t0 A, t0 B, t0 C, t0 D;
insert into t2 values (2), (3), (4), (5);
insert into t3 values (10,3), (20,4), (30,5);
-select * from t2 where t2.a in (select a from t1);
-a
-2
-3
-4
explain extended select * from t2 where t2.a in (select a from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a`
-select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+select * from t2 where t2.a in (select a from t1);
a
2
+3
4
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using where; Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using where; Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
-select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
-3
+4
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
1 PRIMARY t3 range a a 5 NULL 3 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 10 test.t2.a,test.t3.a 116 100.00 Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 10 test.t2.a,test.t3.a 116 0.29 Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1` join `test`.`t3`) where `test`.`t1`.`b` = `test`.`t3`.`a` and `test`.`t1`.`a` = `test`.`t2`.`a`
+select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+a
+2
+3
insert into t1 values (3,31);
+explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using where; Using index; FirstMatch(t2)
+Warnings:
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
@@ -1524,12 +1536,6 @@ select * from t2 where t2.a in (select a from t1 where t1.b <> 30 and t1.b <> 31
a
2
4
-explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using where; Using index; FirstMatch(t2)
-Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
drop table t0, t1, t2, t3;
create table t1 (a int, b int);
create table t2 (a int, b int);
@@ -1591,6 +1597,9 @@ Note 1003 (select 'tttt' AS `s1` from dual)
s1
tttt
drop table t1;
+#
+# IN optimisation test results
+#
create table t1 (s1 char(5), index s1(s1));
create table t2 (s1 char(5), index s1(s1));
insert into t1 values ('a1'),('a2'),('a3');
@@ -2424,19 +2433,22 @@ a
1
3
DROP TABLE t1;
+#
+# SELECT(EXISTS * ...)optimisation
+#
create table t1 (a int, b int);
-insert into t1 values (1,2),(3,4);
-select * from t1 up where exists (select * from t1 where t1.a=up.a);
-a b
-1 2
-3 4
-explain extended select * from t1 up where exists (select * from t1 where t1.a=up.a);
+insert into t1 values (1,2),(3,4),(5,6),(7,8);
+insert into t1 select seq,seq from seq_20_to_40;
+select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
+sum(a+b)
+1296
+explain extended select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY up ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where; FirstMatch(up); Using join buffer (flat, BNL join)
+1 PRIMARY up ALL NULL NULL NULL NULL 25 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 25 4.00 Using where; FirstMatch(up); Using join buffer (flat, BNL join)
Warnings:
Note 1276 Field or reference 'test.up.a' of SELECT #2 was resolved in SELECT #1
-Note 1003 select `test`.`up`.`a` AS `a`,`test`.`up`.`b` AS `b` from `test`.`t1` `up` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`up`.`a`
+Note 1003 select sum(`test`.`up`.`a` + `test`.`up`.`b`) AS `sum(a+b)` from `test`.`t1` `up` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`up`.`a`
drop table t1;
CREATE TABLE t1 (t1_a int);
INSERT INTO t1 VALUES (1);
@@ -2987,7 +2999,7 @@ Note 1003 /* select#1 */ select `test`.`t1`.`one` AS `one`,`test`.`t1`.`two` AS
explain extended SELECT one,two from t1 where ROW(one,two) IN (SELECT one,two FROM t2 WHERE flag = 'N');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 100.00
-1 PRIMARY t2 ALL NULL NULL NULL NULL 9 100.00 Using where; FirstMatch(t1)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 9 11.11 Using where; Start temporary; End temporary
Warnings:
Note 1003 select `test`.`t1`.`one` AS `one`,`test`.`t1`.`two` AS `two` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`flag` = 'N' and `test`.`t2`.`one` = `test`.`t1`.`one` and `test`.`t2`.`two` = `test`.`t1`.`two`
explain extended SELECT one,two,ROW(one,two) IN (SELECT one,two FROM t2 WHERE flag = '0' group by one,two) as 'test' from t1;
@@ -3103,9 +3115,13 @@ retailerID statusID changed
0048 1 2006-01-06 12:37:50
0059 1 2006-01-06 12:37:50
drop table t1;
+#
+# Bug#21180 Subselect with index for both WHERE and ORDER BY
+# produces empty result
+#
create table t1(a int, primary key (a));
insert into t1 values (10);
-create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
+create table t2 (a int primary key, b varchar(32), c int, unique key cb(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
analyze table t1;
@@ -3118,7 +3134,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using where
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3130,7 +3146,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -4228,8 +4244,8 @@ INSERT INTO t2 VALUES (7), (5), (1), (3);
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id);
id st
-3 FL
1 GA
+3 FL
7 FL
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id)
@@ -4330,6 +4346,9 @@ SELECT ((a1,a2) IN (SELECT * FROM t2 WHERE b2 > 0)) IS NULL FROM t1;
0
0
DROP TABLE t1, t2;
+#
+# Bug#28076 inconsistent binary/varbinary comparison
+#
CREATE TABLE t1 (s1 BINARY(5), s2 VARBINARY(5));
INSERT INTO t1 VALUES (0x41,0x41), (0x42,0x42), (0x43,0x43);
SELECT s1, s2 FROM t1 WHERE s2 IN (SELECT s1 FROM t1);
@@ -4391,8 +4410,8 @@ CREATE INDEX I1 ON t1 (a);
CREATE INDEX I2 ON t1 (b);
EXPLAIN SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t1 ref I1 I1 2 test.t1.b 2 Using where; Using index; FirstMatch(t1)
+1 PRIMARY t1 index I1 I1 2 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t1 ref I2 I2 13 test.t1.a 1 Using index condition
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
a b
CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(10));
@@ -4401,15 +4420,15 @@ CREATE INDEX I1 ON t2 (a);
CREATE INDEX I2 ON t2 (b);
EXPLAIN SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t2 ref I1 I1 4 test.t2.b 2 Using where; Using index; FirstMatch(t2)
+1 PRIMARY t2 index I1 I1 4 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t2 ref I2 I2 13 test.t2.a 1 Using index condition
SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
a b
EXPLAIN
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t1 ref I1 I1 2 test.t1.b 2 Using where; Using index; FirstMatch(t1)
+1 PRIMARY t1 index I1 I1 2 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t1 ref I2 I2 13 test.t1.a 1 Using index condition
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
a b
DROP TABLE t1,t2;
@@ -4439,10 +4458,13 @@ out_a MIN(b)
1 2
2 4
DROP TABLE t1;
+#
+# Bug#32036 EXISTS within a WHERE clause with a UNION crashes MySQL 5.122
+#
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (a INT);
-INSERT INTO t1 VALUES (1),(2);
-INSERT INTO t2 VALUES (1),(2);
+INSERT INTO t1 VALUES (1),(2),(3),(4);
+INSERT INTO t2 VALUES (1),(2),(1000);
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
2
2
@@ -4450,8 +4472,8 @@ SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 33.33 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1003 select 2 AS `2` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`a` = `test`.`t1`.`a`
@@ -4459,9 +4481,9 @@ EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a) UNION
(SELECT 1 FROM t2 WHERE t1.a = t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 3 100.00 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
@@ -5697,7 +5719,8 @@ DROP TABLE IF EXISTS ot1, ot4, it2, it3;
CREATE TABLE t1 (a int) ;
INSERT INTO t1 VALUES (NULL), (1), (NULL), (2);
CREATE TABLE t2 (a int, INDEX idx(a)) ;
-INSERT INTO t2 VALUES (NULL), (1), (NULL);
+INSERT INTO t2 VALUES (NULL), (1), (NULL),(1000);
+insert into t2 select seq from seq_3_to_500;
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 USE INDEX () WHERE t2.a = t1.a);
a
@@ -5707,7 +5730,7 @@ SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 USE INDEX() WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 502 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
a
@@ -5717,7 +5740,7 @@ SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY t2 ref idx idx 5 test.t1.a 2 Using index; FirstMatch(t1)
+1 PRIMARY t2 ref idx idx 5 test.t1.a 11 Using index; FirstMatch(t1)
DROP TABLE t1,t2;
#
# BUG#752992: Wrong results for a subquery with 'semijoin=on'
@@ -5734,9 +5757,9 @@ SET @save_join_cache_level=@@join_cache_level;
SET join_cache_level=0;
EXPLAIN SELECT * FROM t1 WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON 1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 3
-1 PRIMARY it eq_ref PRIMARY PRIMARY 4 test.t1.pk 1 Using index
-1 PRIMARY t2 index NULL PRIMARY 4 NULL 3 Using index; FirstMatch(it)
+1 PRIMARY it index PRIMARY PRIMARY 4 NULL 3 Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.it.pk 1
+1 PRIMARY t2 index NULL PRIMARY 4 NULL 3 Using index; FirstMatch(t1)
SELECT * FROM t1 WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON 1);
pk i
11 0
@@ -6633,7 +6656,7 @@ SET @@optimizer_switch='semijoin=off,materialization=off,in_to_exists=on,subquer
EXPLAIN SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index NULL a 4 NULL 2 Using where; Using index
-2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 2 Using index
+2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 1 Using index
SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
a
2009-01-01
@@ -6836,7 +6859,7 @@ FROM t1 AS alias1, t1 AS alias2, t1 AS alias3
WHERE alias1.a = alias2.a OR ('Moscow') IN ( SELECT a FROM t1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY alias1 index a a 19 NULL 11 Using where; Using index
-1 PRIMARY alias2 ref a a 19 test.alias1.a 2 Using index
+1 PRIMARY alias2 ref a a 19 test.alias1.a 1 Using index
1 PRIMARY alias3 index NULL a 19 NULL 11 Using index; Using join buffer (flat, BNL join)
2 SUBQUERY t1 index_subquery a a 19 const 1 Using index; Using where
SELECT MAX( alias2.a )
@@ -6986,7 +7009,7 @@ WHERE SLEEP(0.1) OR c < 'p' OR b = ( SELECT MIN(b) FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
1 PRIMARY t2 ALL b NULL NULL NULL 2 Using where
-1 PRIMARY t3 ref d d 5 test.t2.b 2 Using index
+1 PRIMARY t3 ref d d 5 test.t2.b 1 Using index
3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
set @tmp_mdev410=@@global.userstat;
set global userstat=on;
@@ -7018,7 +7041,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-435: Expensive subqueries may be evaluated during optimization in merge_key_fields
@@ -7051,7 +7074,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-5991: crash in Item_field::used_tables
diff --git a/mysql-test/main/subselect_no_opts.result b/mysql-test/main/subselect_no_opts.result
index 15688fc1717..f9561178ca5 100644
--- a/mysql-test/main/subselect_no_opts.result
+++ b/mysql-test/main/subselect_no_opts.result
@@ -898,6 +898,9 @@ select (select a+1) from t1;
NULL
4.5
drop table t1;
+#
+# Null with keys
+#
CREATE TABLE t1 (a int(11) NOT NULL default '0', PRIMARY KEY (a));
CREATE TABLE t2 (a int(11) default '0', INDEX (a));
INSERT INTO t1 VALUES (1),(2),(3),(4);
@@ -1429,6 +1432,9 @@ drop table if exists t1;
(SELECT 1 as a) UNION (SELECT 1) ORDER BY (SELECT a+0);
a
1
+#
+# IN subselect optimization test
+#
create table t1 (a int not null, b int, primary key (a));
create table t2 (a int not null, primary key (a));
create table t3 (a int not null, b int, primary key (a));
@@ -1475,23 +1481,19 @@ insert into t1 values (1,10), (2,20), (3,30), (4,40);
create table t0(a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
insert into t1
-select rand()*100000+200,rand()*100000 from t0 A, t0 B, t0 C, t0 D;
+select rand()*100000+200,rand(1)*100000 from t0 A, t0 B, t0 C, t0 D;
insert into t2 values (2), (3), (4), (5);
insert into t3 values (10,3), (20,4), (30,5);
-select * from t2 where t2.a in (select a from t1);
-a
-2
-3
-4
explain extended select * from t2 where t2.a in (select a from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index NULL a 5 NULL 4 100.00 Using where; Using index
2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 100.00 Using index
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <in_optimizer>(`test`.`t2`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on a)))
-select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+select * from t2 where t2.a in (select a from t1);
a
2
+3
4
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
@@ -1499,18 +1501,28 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 100.00 Using index; Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <in_optimizer>(`test`.`t2`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on a where `test`.`t1`.`b` <> 30 and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`)))
-select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
-3
+4
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index NULL a 5 NULL 4 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t3 index a a 5 NULL 3 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t1 ref a a 10 func,test.t3.a 1167 100.00 Using index
+2 DEPENDENT SUBQUERY t1 ref a a 5 func 1001 100.00 Using index
+2 DEPENDENT SUBQUERY t3 index a a 5 NULL 3 33.33 Using where; Using index; Using join buffer (flat, BNL join)
Warnings:
-Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <in_optimizer>(`test`.`t2`.`a`,<exists>(/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` join `test`.`t3` where `test`.`t1`.`b` = `test`.`t3`.`a` and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`))
+Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <in_optimizer>(`test`.`t2`.`a`,<exists>(/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` join `test`.`t3` where `test`.`t3`.`a` = `test`.`t1`.`b` and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`))
+select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+a
+2
+3
insert into t1 values (3,31);
+explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t2 index NULL a 5 NULL 4 100.00 Using where; Using index
+2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 100.00 Using index; Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <in_optimizer>(`test`.`t2`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on a where `test`.`t1`.`b` <> 30 and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`)))
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
@@ -1520,12 +1532,6 @@ select * from t2 where t2.a in (select a from t1 where t1.b <> 30 and t1.b <> 31
a
2
4
-explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index NULL a 5 NULL 4 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 100.00 Using index; Using where
-Warnings:
-Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <in_optimizer>(`test`.`t2`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on a where `test`.`t1`.`b` <> 30 and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`)))
drop table t0, t1, t2, t3;
create table t1 (a int, b int);
create table t2 (a int, b int);
@@ -1587,6 +1593,9 @@ Note 1003 (select 'tttt' AS `s1` from dual)
s1
tttt
drop table t1;
+#
+# IN optimisation test results
+#
create table t1 (s1 char(5), index s1(s1));
create table t2 (s1 char(5), index s1(s1));
insert into t1 values ('a1'),('a2'),('a3');
@@ -2420,19 +2429,22 @@ a
1
3
DROP TABLE t1;
+#
+# SELECT(EXISTS * ...)optimisation
+#
create table t1 (a int, b int);
-insert into t1 values (1,2),(3,4);
-select * from t1 up where exists (select * from t1 where t1.a=up.a);
-a b
-1 2
-3 4
-explain extended select * from t1 up where exists (select * from t1 where t1.a=up.a);
+insert into t1 values (1,2),(3,4),(5,6),(7,8);
+insert into t1 select seq,seq from seq_20_to_40;
+select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
+sum(a+b)
+1296
+explain extended select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY up ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY up ALL NULL NULL NULL NULL 25 100.00 Using where
+2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 25 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.up.a' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `test`.`up`.`a` AS `a`,`test`.`up`.`b` AS `b` from `test`.`t1` `up` where <in_optimizer>(`test`.`up`.`a`,<exists>(/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` where <cache>(`test`.`up`.`a`) = `test`.`t1`.`a`))
+Note 1003 /* select#1 */ select sum(`test`.`up`.`a` + `test`.`up`.`b`) AS `sum(a+b)` from `test`.`t1` `up` where <in_optimizer>(`test`.`up`.`a`,<exists>(/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` where <cache>(`test`.`up`.`a`) = `test`.`t1`.`a`))
drop table t1;
CREATE TABLE t1 (t1_a int);
INSERT INTO t1 VALUES (1);
@@ -3099,9 +3111,13 @@ retailerID statusID changed
0048 1 2006-01-06 12:37:50
0059 1 2006-01-06 12:37:50
drop table t1;
+#
+# Bug#21180 Subselect with index for both WHERE and ORDER BY
+# produces empty result
+#
create table t1(a int, primary key (a));
insert into t1 values (10);
-create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
+create table t2 (a int primary key, b varchar(32), c int, unique key cb(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
analyze table t1;
@@ -3114,7 +3130,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using where
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3126,7 +3142,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -4224,8 +4240,8 @@ INSERT INTO t2 VALUES (7), (5), (1), (3);
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id);
id st
-3 FL
1 GA
+3 FL
7 FL
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id)
@@ -4326,6 +4342,9 @@ SELECT ((a1,a2) IN (SELECT * FROM t2 WHERE b2 > 0)) IS NULL FROM t1;
0
0
DROP TABLE t1, t2;
+#
+# Bug#28076 inconsistent binary/varbinary comparison
+#
CREATE TABLE t1 (s1 BINARY(5), s2 VARBINARY(5));
INSERT INTO t1 VALUES (0x41,0x41), (0x42,0x42), (0x43,0x43);
SELECT s1, s2 FROM t1 WHERE s2 IN (SELECT s1 FROM t1);
@@ -4388,7 +4407,7 @@ CREATE INDEX I2 ON t1 (b);
EXPLAIN SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY t1 index_subquery I1 I1 2 func 2 Using index; Using where
+2 DEPENDENT SUBQUERY t1 index_subquery I1 I1 2 func 1 Using index; Using where
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
a b
CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(10));
@@ -4398,14 +4417,14 @@ CREATE INDEX I2 ON t2 (b);
EXPLAIN SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY t2 index_subquery I1 I1 4 func 2 Using index; Using where
+2 DEPENDENT SUBQUERY t2 index_subquery I1 I1 4 func 1 Using index; Using where
SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
a b
EXPLAIN
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY t1 index_subquery I1 I1 2 func 2 Using index; Using where
+2 DEPENDENT SUBQUERY t1 index_subquery I1 I1 2 func 1 Using index; Using where
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
a b
DROP TABLE t1,t2;
@@ -4435,10 +4454,13 @@ out_a MIN(b)
1 2
2 4
DROP TABLE t1;
+#
+# Bug#32036 EXISTS within a WHERE clause with a UNION crashes MySQL 5.122
+#
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (a INT);
-INSERT INTO t1 VALUES (1),(2);
-INSERT INTO t2 VALUES (1),(2);
+INSERT INTO t1 VALUES (1),(2),(3),(4);
+INSERT INTO t2 VALUES (1),(2),(1000);
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
2
2
@@ -4446,8 +4468,8 @@ SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select 2 AS `2` from `test`.`t1` where <in_optimizer>(`test`.`t1`.`a`,<exists>(/* select#2 */ select `test`.`t2`.`a` from `test`.`t2` where <cache>(`test`.`t1`.`a`) = `test`.`t2`.`a`))
@@ -4455,9 +4477,9 @@ EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a) UNION
(SELECT 1 FROM t2 WHERE t1.a = t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 3 100.00 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
@@ -5693,7 +5715,8 @@ DROP TABLE IF EXISTS ot1, ot4, it2, it3;
CREATE TABLE t1 (a int) ;
INSERT INTO t1 VALUES (NULL), (1), (NULL), (2);
CREATE TABLE t2 (a int, INDEX idx(a)) ;
-INSERT INTO t2 VALUES (NULL), (1), (NULL);
+INSERT INTO t2 VALUES (NULL), (1), (NULL),(1000);
+insert into t2 select seq from seq_3_to_500;
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 USE INDEX () WHERE t2.a = t1.a);
a
@@ -5703,7 +5726,7 @@ SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 USE INDEX() WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 502 Using where
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
a
@@ -5713,7 +5736,7 @@ SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
-2 DEPENDENT SUBQUERY t2 index_subquery idx idx 5 func 2 Using index
+2 DEPENDENT SUBQUERY t2 index_subquery idx idx 5 func 58 Using index
DROP TABLE t1,t2;
#
# BUG#752992: Wrong results for a subquery with 'semijoin=on'
@@ -6073,7 +6096,7 @@ WHERE col_varchar_nokey IN
(SELECT col_varchar_key FROM it1 WHERE col_int_key IS NULL);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY ot system NULL NULL NULL NULL 1
-2 DEPENDENT SUBQUERY it1 index_subquery idx_cvk_cik idx_cvk_cik 9 func,const 2 Using index; Using where
+2 DEPENDENT SUBQUERY it1 index_subquery idx_cvk_cik idx_cvk_cik 9 func,const 1 Using index; Using where
SELECT col_int_nokey FROM ot
WHERE col_varchar_nokey IN
(SELECT col_varchar_key FROM it1 WHERE col_int_key IS NULL);
@@ -6629,7 +6652,7 @@ SET @@optimizer_switch='semijoin=off,materialization=off,in_to_exists=on,subquer
EXPLAIN SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index NULL a 4 NULL 2 Using where; Using index
-2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 2 Using index
+2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 1 Using index
SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
a
2009-01-01
@@ -6832,7 +6855,7 @@ FROM t1 AS alias1, t1 AS alias2, t1 AS alias3
WHERE alias1.a = alias2.a OR ('Moscow') IN ( SELECT a FROM t1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY alias1 index a a 19 NULL 11 Using where; Using index
-1 PRIMARY alias2 ref a a 19 test.alias1.a 2 Using index
+1 PRIMARY alias2 ref a a 19 test.alias1.a 1 Using index
1 PRIMARY alias3 index NULL a 19 NULL 11 Using index; Using join buffer (flat, BNL join)
2 SUBQUERY t1 index_subquery a a 19 const 1 Using index; Using where
SELECT MAX( alias2.a )
@@ -6983,7 +7006,7 @@ WHERE SLEEP(0.1) OR c < 'p' OR b = ( SELECT MIN(b) FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
1 PRIMARY t2 ALL b NULL NULL NULL 2 Using where
-1 PRIMARY t3 ref d d 5 test.t2.b 2 Using index
+1 PRIMARY t3 ref d d 5 test.t2.b 1 Using index
3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
set @tmp_mdev410=@@global.userstat;
set global userstat=on;
@@ -7015,7 +7038,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-435: Expensive subqueries may be evaluated during optimization in merge_key_fields
@@ -7049,7 +7072,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-5991: crash in Item_field::used_tables
diff --git a/mysql-test/main/subselect_no_scache.result b/mysql-test/main/subselect_no_scache.result
index e3bdddbf84b..813ca78703a 100644
--- a/mysql-test/main/subselect_no_scache.result
+++ b/mysql-test/main/subselect_no_scache.result
@@ -351,7 +351,7 @@ patient_uq clinic_uq
explain extended select * from t6 where exists (select * from t7 where uq = clinic_uq);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t7 index PRIMARY PRIMARY 4 NULL 2 100.00 Using index
-1 PRIMARY t6 ALL i1 NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t6 ref i1 i1 5 test.t7.uq 1 100.00
Warnings:
Note 1276 Field or reference 'test.t6.clinic_uq' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t6`.`patient_uq` AS `patient_uq`,`test`.`t6`.`clinic_uq` AS `clinic_uq` from `test`.`t7` join `test`.`t6` where `test`.`t6`.`clinic_uq` = `test`.`t7`.`uq`
@@ -901,6 +901,9 @@ select (select a+1) from t1;
NULL
4.5
drop table t1;
+#
+# Null with keys
+#
CREATE TABLE t1 (a int(11) NOT NULL default '0', PRIMARY KEY (a));
CREATE TABLE t2 (a int(11) default '0', INDEX (a));
INSERT INTO t1 VALUES (1),(2),(3),(4);
@@ -1432,6 +1435,9 @@ drop table if exists t1;
(SELECT 1 as a) UNION (SELECT 1) ORDER BY (SELECT a+0);
a
1
+#
+# IN subselect optimization test
+#
create table t1 (a int not null, b int, primary key (a));
create table t2 (a int not null, primary key (a));
create table t3 (a int not null, b int, primary key (a));
@@ -1455,21 +1461,21 @@ a
4
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using where
+1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 100.00 Using where
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00 Using index
Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
a
2
3
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00
-1 PRIMARY t3 index PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 100.00 Using where
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00 Using index
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 100.00 Using index
Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t1`.`a` = `test`.`t2`.`a`
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t2`.`a` = `test`.`t1`.`a`
drop table t1, t2, t3;
create table t1 (a int, b int, index a (a,b));
create table t2 (a int, index a (a));
@@ -1478,42 +1484,48 @@ insert into t1 values (1,10), (2,20), (3,30), (4,40);
create table t0(a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
insert into t1
-select rand()*100000+200,rand()*100000 from t0 A, t0 B, t0 C, t0 D;
+select rand()*100000+200,rand(1)*100000 from t0 A, t0 B, t0 C, t0 D;
insert into t2 values (2), (3), (4), (5);
insert into t3 values (10,3), (20,4), (30,5);
-select * from t2 where t2.a in (select a from t1);
-a
-2
-3
-4
explain extended select * from t2 where t2.a in (select a from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a`
-select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+select * from t2 where t2.a in (select a from t1);
a
2
+3
4
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using where; Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using where; Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
-select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
-3
+4
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
1 PRIMARY t3 range a a 5 NULL 3 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 10 test.t2.a,test.t3.a 116 100.00 Using index; FirstMatch(t2)
+1 PRIMARY t1 ref a a 10 test.t2.a,test.t3.a 116 0.29 Using index; FirstMatch(t2)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1` join `test`.`t3`) where `test`.`t1`.`b` = `test`.`t3`.`a` and `test`.`t1`.`a` = `test`.`t2`.`a`
+select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+a
+2
+3
insert into t1 values (3,31);
+explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
+1 PRIMARY t1 ref a a 5 test.t2.a 101 0.99 Using where; Using index; FirstMatch(t2)
+Warnings:
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
@@ -1523,12 +1535,6 @@ select * from t2 where t2.a in (select a from t1 where t1.b <> 30 and t1.b <> 31
a
2
4
-explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index a a 5 NULL 4 100.00 Using where; Using index
-1 PRIMARY t1 ref a a 5 test.t2.a 101 100.00 Using where; Using index; FirstMatch(t2)
-Warnings:
-Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
drop table t0, t1, t2, t3;
create table t1 (a int, b int);
create table t2 (a int, b int);
@@ -1590,6 +1596,9 @@ Note 1003 (select 'tttt' AS `s1` from dual)
s1
tttt
drop table t1;
+#
+# IN optimisation test results
+#
create table t1 (s1 char(5), index s1(s1));
create table t2 (s1 char(5), index s1(s1));
insert into t1 values ('a1'),('a2'),('a3');
@@ -2423,20 +2432,23 @@ a
1
3
DROP TABLE t1;
+#
+# SELECT(EXISTS * ...)optimisation
+#
create table t1 (a int, b int);
-insert into t1 values (1,2),(3,4);
-select * from t1 up where exists (select * from t1 where t1.a=up.a);
-a b
-1 2
-3 4
-explain extended select * from t1 up where exists (select * from t1 where t1.a=up.a);
+insert into t1 values (1,2),(3,4),(5,6),(7,8);
+insert into t1 select seq,seq from seq_20_to_40;
+select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
+sum(a+b)
+1296
+explain extended select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY up ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY up ALL NULL NULL NULL NULL 25 100.00
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 2 100.00
+2 MATERIALIZED t1 ALL NULL NULL NULL NULL 25 100.00
Warnings:
Note 1276 Field or reference 'test.up.a' of SELECT #2 was resolved in SELECT #1
-Note 1003 select `test`.`up`.`a` AS `a`,`test`.`up`.`b` AS `b` from `test`.`t1` `up` semi join (`test`.`t1`) where 1
+Note 1003 select sum(`test`.`up`.`a` + `test`.`up`.`b`) AS `sum(a+b)` from `test`.`t1` `up` semi join (`test`.`t1`) where 1
drop table t1;
CREATE TABLE t1 (t1_a int);
INSERT INTO t1 VALUES (1);
@@ -3104,9 +3116,13 @@ retailerID statusID changed
0048 1 2006-01-06 12:37:50
0059 1 2006-01-06 12:37:50
drop table t1;
+#
+# Bug#21180 Subselect with index for both WHERE and ORDER BY
+# produces empty result
+#
create table t1(a int, primary key (a));
insert into t1 values (10);
-create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
+create table t2 (a int primary key, b varchar(32), c int, unique key cb(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
analyze table t1;
@@ -3119,7 +3135,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using where
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3131,7 +3147,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -4231,8 +4247,8 @@ INSERT INTO t2 VALUES (7), (5), (1), (3);
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id);
id st
-3 FL
1 GA
+3 FL
7 FL
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id)
@@ -4333,6 +4349,9 @@ SELECT ((a1,a2) IN (SELECT * FROM t2 WHERE b2 > 0)) IS NULL FROM t1;
0
0
DROP TABLE t1, t2;
+#
+# Bug#28076 inconsistent binary/varbinary comparison
+#
CREATE TABLE t1 (s1 BINARY(5), s2 VARBINARY(5));
INSERT INTO t1 VALUES (0x41,0x41), (0x42,0x42), (0x43,0x43);
SELECT s1, s2 FROM t1 WHERE s2 IN (SELECT s1 FROM t1);
@@ -4394,8 +4413,8 @@ CREATE INDEX I1 ON t1 (a);
CREATE INDEX I2 ON t1 (b);
EXPLAIN SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t1 ref I1 I1 2 test.t1.b 2 Using where; Using index; FirstMatch(t1)
+1 PRIMARY t1 index I1 I1 2 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t1 ref I2 I2 13 test.t1.a 1 Using index condition
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
a b
CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(10));
@@ -4404,15 +4423,15 @@ CREATE INDEX I1 ON t2 (a);
CREATE INDEX I2 ON t2 (b);
EXPLAIN SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t2 ref I1 I1 4 test.t2.b 2 Using where; Using index; FirstMatch(t2)
+1 PRIMARY t2 index I1 I1 4 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t2 ref I2 I2 13 test.t2.a 1 Using index condition
SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
a b
EXPLAIN
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL I2 NULL NULL NULL 2 Using where
-1 PRIMARY t1 ref I1 I1 2 test.t1.b 2 Using where; Using index; FirstMatch(t1)
+1 PRIMARY t1 index I1 I1 2 NULL 2 Using where; Using index; LooseScan
+1 PRIMARY t1 ref I2 I2 13 test.t1.a 1 Using index condition
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
a b
DROP TABLE t1,t2;
@@ -4442,10 +4461,13 @@ out_a MIN(b)
1 2
2 4
DROP TABLE t1;
+#
+# Bug#32036 EXISTS within a WHERE clause with a UNION crashes MySQL 5.122
+#
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (a INT);
-INSERT INTO t1 VALUES (1),(2);
-INSERT INTO t2 VALUES (1),(2);
+INSERT INTO t1 VALUES (1),(2),(3),(4);
+INSERT INTO t2 VALUES (1),(2),(1000);
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
2
2
@@ -4453,19 +4475,18 @@ SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 33.33 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
-Note 1003 select 2 AS `2` from `test`.`t1` semi join (`test`.`t2`) where 1
+Note 1003 select 2 AS `2` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`a` = `test`.`t1`.`a`
EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a) UNION
(SELECT 1 FROM t2 WHERE t1.a = t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 3 100.00 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
@@ -5704,7 +5725,8 @@ DROP TABLE IF EXISTS ot1, ot4, it2, it3;
CREATE TABLE t1 (a int) ;
INSERT INTO t1 VALUES (NULL), (1), (NULL), (2);
CREATE TABLE t2 (a int, INDEX idx(a)) ;
-INSERT INTO t2 VALUES (NULL), (1), (NULL);
+INSERT INTO t2 VALUES (NULL), (1), (NULL),(1000);
+insert into t2 select seq from seq_3_to_500;
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 USE INDEX () WHERE t2.a = t1.a);
a
@@ -5715,7 +5737,7 @@ WHERE EXISTS (SELECT a FROM t2 USE INDEX() WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 502
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
a
@@ -5724,9 +5746,8 @@ EXPLAIN
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 index idx idx 5 NULL 3 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 11 Using index; FirstMatch(t1)
DROP TABLE t1,t2;
#
# BUG#752992: Wrong results for a subquery with 'semijoin=on'
@@ -5743,9 +5764,9 @@ SET @save_join_cache_level=@@join_cache_level;
SET join_cache_level=0;
EXPLAIN SELECT * FROM t1 WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON 1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 3
-1 PRIMARY it eq_ref PRIMARY PRIMARY 4 test.t1.pk 1 Using index
-1 PRIMARY t2 index NULL PRIMARY 4 NULL 3 Using index; FirstMatch(it)
+1 PRIMARY it index PRIMARY PRIMARY 4 NULL 3 Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.it.pk 1
+1 PRIMARY t2 index NULL PRIMARY 4 NULL 3 Using index; FirstMatch(t1)
SELECT * FROM t1 WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON 1);
pk i
11 0
@@ -6086,8 +6107,7 @@ WHERE col_varchar_nokey IN
(SELECT col_varchar_key FROM it1 WHERE col_int_key IS NULL);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY ot system NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED it1 ref idx_cvk_cik idx_cvk_cik 9 const,const 1 Using where; Using index
+1 PRIMARY it1 ref idx_cvk_cik idx_cvk_cik 9 const,const 1 Using where; Using index; FirstMatch(ot)
SELECT col_int_nokey FROM ot
WHERE col_varchar_nokey IN
(SELECT col_varchar_key FROM it1 WHERE col_int_key IS NULL);
@@ -6099,8 +6119,7 @@ WHERE (col_varchar_nokey, 'x') IN
(SELECT col_varchar_key, col_varchar_key2 FROM it2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY ot system NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1
-2 MATERIALIZED it2 ref idx_cvk_cvk2_cik,idx_cvk_cik idx_cvk_cvk2_cik 8 const,const 1 Using where; Using index
+1 PRIMARY it2 ref idx_cvk_cvk2_cik,idx_cvk_cik idx_cvk_cvk2_cik 8 const,const 1 Using where; Using index; FirstMatch(ot)
SELECT col_int_nokey FROM ot
WHERE (col_varchar_nokey, 'x') IN
(SELECT col_varchar_key, col_varchar_key2 FROM it2);
@@ -6644,7 +6663,7 @@ SET @@optimizer_switch='semijoin=off,materialization=off,in_to_exists=on,subquer
EXPLAIN SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index NULL a 4 NULL 2 Using where; Using index
-2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 2 Using index
+2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 1 Using index
SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
a
2009-01-01
@@ -6847,7 +6866,7 @@ FROM t1 AS alias1, t1 AS alias2, t1 AS alias3
WHERE alias1.a = alias2.a OR ('Moscow') IN ( SELECT a FROM t1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY alias1 index a a 19 NULL 11 Using where; Using index
-1 PRIMARY alias2 ref a a 19 test.alias1.a 2 Using index
+1 PRIMARY alias2 ref a a 19 test.alias1.a 1 Using index
1 PRIMARY alias3 index NULL a 19 NULL 11 Using index; Using join buffer (flat, BNL join)
2 SUBQUERY t1 index_subquery a a 19 const 1 Using index; Using where
SELECT MAX( alias2.a )
@@ -6998,7 +7017,7 @@ WHERE SLEEP(0.1) OR c < 'p' OR b = ( SELECT MIN(b) FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
1 PRIMARY t2 ALL b NULL NULL NULL 2 Using where
-1 PRIMARY t3 ref d d 5 test.t2.b 2 Using index
+1 PRIMARY t3 ref d d 5 test.t2.b 1 Using index
3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
set @tmp_mdev410=@@global.userstat;
set global userstat=on;
@@ -7030,7 +7049,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-435: Expensive subqueries may be evaluated during optimization in merge_key_fields
@@ -7064,7 +7083,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-5991: crash in Item_field::used_tables
diff --git a/mysql-test/main/subselect_no_semijoin.result b/mysql-test/main/subselect_no_semijoin.result
index 88f8f78019e..19dae33c5c1 100644
--- a/mysql-test/main/subselect_no_semijoin.result
+++ b/mysql-test/main/subselect_no_semijoin.result
@@ -348,10 +348,10 @@ patient_uq clinic_uq
explain extended select * from t6 where exists (select * from t7 where uq = clinic_uq);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t6 ALL NULL NULL NULL NULL 4 100.00 Using where
-2 MATERIALIZED t7 index PRIMARY PRIMARY 4 NULL 2 100.00 Using index
+2 DEPENDENT SUBQUERY t7 unique_subquery PRIMARY PRIMARY 4 func 1 100.00 Using index
Warnings:
Note 1276 Field or reference 'test.t6.clinic_uq' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `test`.`t6`.`patient_uq` AS `patient_uq`,`test`.`t6`.`clinic_uq` AS `clinic_uq` from `test`.`t6` where <expr_cache><`test`.`t6`.`clinic_uq`>(<in_optimizer>(`test`.`t6`.`clinic_uq`,`test`.`t6`.`clinic_uq` in ( <materialize> (/* select#2 */ select `test`.`t7`.`uq` from `test`.`t7` where 1 ), <primary_index_lookup>(`test`.`t6`.`clinic_uq` in <temporary table> on distinct_key where `test`.`t6`.`clinic_uq` = `<subquery2>`.`uq`))))
+Note 1003 /* select#1 */ select `test`.`t6`.`patient_uq` AS `patient_uq`,`test`.`t6`.`clinic_uq` AS `clinic_uq` from `test`.`t6` where <expr_cache><`test`.`t6`.`clinic_uq`>(<in_optimizer>(`test`.`t6`.`clinic_uq`,<exists>(<primary_index_lookup>(<cache>(`test`.`t6`.`clinic_uq`) in t7 on PRIMARY))))
select * from t1 where a= (select a from t2,t4 where t2.b=t4.b);
ERROR 23000: Column 'a' in field list is ambiguous
drop table t1,t2,t3;
@@ -898,6 +898,9 @@ select (select a+1) from t1;
NULL
4.5
drop table t1;
+#
+# Null with keys
+#
CREATE TABLE t1 (a int(11) NOT NULL default '0', PRIMARY KEY (a));
CREATE TABLE t2 (a int(11) default '0', INDEX (a));
INSERT INTO t1 VALUES (1),(2),(3),(4);
@@ -911,9 +914,9 @@ a t1.a in (select t2.a from t2)
explain extended SELECT t1.a, t1.a in (select t2.a from t2) FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL PRIMARY 4 NULL 4 100.00 Using index
-2 MATERIALIZED t2 index a a 5 NULL 3 100.00 Using index
+2 SUBQUERY t2 index_subquery a a 5 func 2 100.00 Using index
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,<expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,`test`.`t1`.`a` in ( <materialize> (/* select#2 */ select `test`.`t2`.`a` from `test`.`t2` ), <primary_index_lookup>(`test`.`t1`.`a` in <temporary table> on distinct_key where `test`.`t1`.`a` = `<subquery2>`.`a`)))) AS `t1.a in (select t2.a from t2)` from `test`.`t1`
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,<expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`a`) in t2 on a checking NULL having `test`.`t2`.`a` is null)))) AS `t1.a in (select t2.a from t2)` from `test`.`t1`
CREATE TABLE t3 (a int(11) default '0');
INSERT INTO t3 VALUES (1),(2),(3);
SELECT t1.a, t1.a in (select t2.a from t2,t3 where t3.a=t2.a) FROM t1;
@@ -925,10 +928,10 @@ a t1.a in (select t2.a from t2,t3 where t3.a=t2.a)
explain extended SELECT t1.a, t1.a in (select t2.a from t2,t3 where t3.a=t2.a) FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL PRIMARY 4 NULL 4 100.00 Using index
-2 MATERIALIZED t2 index a a 5 NULL 3 100.00 Using index
-2 MATERIALIZED t3 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
+2 MATERIALIZED t3 ALL NULL NULL NULL NULL 3 100.00 Using where
+2 MATERIALIZED t2 ref a a 5 test.t3.a 1 100.00 Using index
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,<expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,`test`.`t1`.`a` in ( <materialize> (/* select#2 */ select `test`.`t2`.`a` from `test`.`t2` join `test`.`t3` where `test`.`t3`.`a` = `test`.`t2`.`a` ), <primary_index_lookup>(`test`.`t1`.`a` in <temporary table> on distinct_key where `test`.`t1`.`a` = `<subquery2>`.`a`)))) AS `t1.a in (select t2.a from t2,t3 where t3.a=t2.a)` from `test`.`t1`
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,<expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,`test`.`t1`.`a` in ( <materialize> (/* select#2 */ select `test`.`t2`.`a` from `test`.`t2` join `test`.`t3` where `test`.`t2`.`a` = `test`.`t3`.`a` ), <primary_index_lookup>(`test`.`t1`.`a` in <temporary table> on distinct_key where `test`.`t1`.`a` = `<subquery2>`.`a`)))) AS `t1.a in (select t2.a from t2,t3 where t3.a=t2.a)` from `test`.`t1`
drop table t1,t2,t3;
# check correct NULL Processing for normal IN/ALL/ANY
# and 2 ways of max/min optimization
@@ -1429,6 +1432,9 @@ drop table if exists t1;
(SELECT 1 as a) UNION (SELECT 1) ORDER BY (SELECT a+0);
a
1
+#
+# IN subselect optimization test
+#
create table t1 (a int not null, b int, primary key (a));
create table t2 (a int not null, primary key (a));
create table t3 (a int not null, b int, primary key (a));
@@ -1443,9 +1449,9 @@ a
explain extended select * from t2 where t2.a in (select a from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index NULL PRIMARY 4 NULL 4 100.00 Using where; Using index
-2 MATERIALIZED t1 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
+2 DEPENDENT SUBQUERY t1 unique_subquery PRIMARY PRIMARY 4 func 1 100.00 Using index
Warnings:
-Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,`test`.`t2`.`a` in ( <materialize> (/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` ), <primary_index_lookup>(`test`.`t2`.`a` in <temporary table> on distinct_key where `test`.`t2`.`a` = `<subquery2>`.`a`))))
+Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(<primary_index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on PRIMARY))))
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
@@ -1453,9 +1459,9 @@ a
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index NULL PRIMARY 4 NULL 4 100.00 Using where; Using index
-2 MATERIALIZED t1 ALL PRIMARY NULL NULL NULL 4 100.00 Using where
+2 DEPENDENT SUBQUERY t1 unique_subquery PRIMARY PRIMARY 4 func 1 100.00 Using where
Warnings:
-Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,`test`.`t2`.`a` in ( <materialize> (/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` where `test`.`t1`.`b` <> 30 ), <primary_index_lookup>(`test`.`t2`.`a` in <temporary table> on distinct_key where `test`.`t2`.`a` = `<subquery2>`.`a`))))
+Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(<primary_index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on PRIMARY where `test`.`t1`.`b` <> 30 and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`))))
select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
a
2
@@ -1463,10 +1469,10 @@ a
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index NULL PRIMARY 4 NULL 4 100.00 Using where; Using index
-2 MATERIALIZED t3 index PRIMARY PRIMARY 4 NULL 3 100.00 Using index
-2 MATERIALIZED t1 ALL PRIMARY NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
+2 DEPENDENT SUBQUERY t1 eq_ref PRIMARY PRIMARY 4 func 1 100.00 Using where
+2 DEPENDENT SUBQUERY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 100.00 Using index
Warnings:
-Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,`test`.`t2`.`a` in ( <materialize> (/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` join `test`.`t3` where `test`.`t1`.`b` = `test`.`t3`.`a` ), <primary_index_lookup>(`test`.`t2`.`a` in <temporary table> on distinct_key where `test`.`t2`.`a` = `<subquery2>`.`a`))))
+Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` join `test`.`t3` where `test`.`t3`.`a` = `test`.`t1`.`b` and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`)))
drop table t1, t2, t3;
create table t1 (a int, b int, index a (a,b));
create table t2 (a int, index a (a));
@@ -1475,23 +1481,19 @@ insert into t1 values (1,10), (2,20), (3,30), (4,40);
create table t0(a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
insert into t1
-select rand()*100000+200,rand()*100000 from t0 A, t0 B, t0 C, t0 D;
+select rand()*100000+200,rand(1)*100000 from t0 A, t0 B, t0 C, t0 D;
insert into t2 values (2), (3), (4), (5);
insert into t3 values (10,3), (20,4), (30,5);
-select * from t2 where t2.a in (select a from t1);
-a
-2
-3
-4
explain extended select * from t2 where t2.a in (select a from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index NULL a 5 NULL 4 100.00 Using where; Using index
2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 100.00 Using index
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on a))))
-select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+select * from t2 where t2.a in (select a from t1);
a
2
+3
4
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
@@ -1499,18 +1501,28 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 100.00 Using index; Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on a where `test`.`t1`.`b` <> 30 and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`))))
-select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
-3
+4
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index NULL a 5 NULL 4 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t3 index a a 5 NULL 3 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t1 ref a a 10 func,test.t3.a 1167 100.00 Using index
+2 DEPENDENT SUBQUERY t1 ref a a 5 func 1001 100.00 Using index
+2 DEPENDENT SUBQUERY t3 index a a 5 NULL 3 33.33 Using where; Using index; Using join buffer (flat, BNL join)
Warnings:
-Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` join `test`.`t3` where `test`.`t1`.`b` = `test`.`t3`.`a` and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`)))
+Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` join `test`.`t3` where `test`.`t3`.`a` = `test`.`t1`.`b` and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`)))
+select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
+a
+2
+3
insert into t1 values (3,31);
+explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t2 index NULL a 5 NULL 4 100.00 Using where; Using index
+2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 100.00 Using index; Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on a where `test`.`t1`.`b` <> 30 and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`))))
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
a
2
@@ -1520,12 +1532,6 @@ select * from t2 where t2.a in (select a from t1 where t1.b <> 30 and t1.b <> 31
a
2
4
-explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 index NULL a 5 NULL 4 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1001 100.00 Using index; Using where
-Warnings:
-Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where <expr_cache><`test`.`t2`.`a`>(<in_optimizer>(`test`.`t2`.`a`,<exists>(<index_lookup>(<cache>(`test`.`t2`.`a`) in t1 on a where `test`.`t1`.`b` <> 30 and <cache>(`test`.`t2`.`a`) = `test`.`t1`.`a`))))
drop table t0, t1, t2, t3;
create table t1 (a int, b int);
create table t2 (a int, b int);
@@ -1587,6 +1593,9 @@ Note 1003 (select 'tttt' AS `s1` from dual)
s1
tttt
drop table t1;
+#
+# IN optimisation test results
+#
create table t1 (s1 char(5), index s1(s1));
create table t2 (s1 char(5), index s1(s1));
insert into t1 values ('a1'),('a2'),('a3');
@@ -1614,27 +1623,27 @@ a3 1
explain extended select s1, s1 NOT IN (SELECT s1 FROM t2) from t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL s1 6 NULL 3 100.00 Using index
-2 MATERIALIZED t2 index s1 s1 6 NULL 2 100.00 Using index
+2 SUBQUERY t2 index_subquery s1 s1 6 func 2 100.00 Using index; Full scan on NULL key
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t1`.`s1`>(<in_optimizer>(`test`.`t1`.`s1`,`test`.`t1`.`s1` in ( <materialize> (/* select#2 */ select `test`.`t2`.`s1` from `test`.`t2` ), <primary_index_lookup>(`test`.`t1`.`s1` in <temporary table> on distinct_key where `test`.`t1`.`s1` = `<subquery2>`.`s1`)))) AS `s1 NOT IN (SELECT s1 FROM t2)` from `test`.`t1`
+Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t1`.`s1`>(<in_optimizer>(`test`.`t1`.`s1`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`s1`) in t2 on s1 checking NULL having trigcond(`test`.`t2`.`s1` is null))))) AS `s1 NOT IN (SELECT s1 FROM t2)` from `test`.`t1`
explain extended select s1, s1 = ANY (SELECT s1 FROM t2) from t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL s1 6 NULL 3 100.00 Using index
-2 MATERIALIZED t2 index s1 s1 6 NULL 2 100.00 Using index
+2 SUBQUERY t2 index_subquery s1 s1 6 func 2 100.00 Using index; Full scan on NULL key
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,<expr_cache><`test`.`t1`.`s1`>(<in_optimizer>(`test`.`t1`.`s1`,`test`.`t1`.`s1` in ( <materialize> (/* select#2 */ select `test`.`t2`.`s1` from `test`.`t2` ), <primary_index_lookup>(`test`.`t1`.`s1` in <temporary table> on distinct_key where `test`.`t1`.`s1` = `<subquery2>`.`s1`)))) AS `s1 = ANY (SELECT s1 FROM t2)` from `test`.`t1`
+Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,<expr_cache><`test`.`t1`.`s1`>(<in_optimizer>(`test`.`t1`.`s1`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`s1`) in t2 on s1 checking NULL having trigcond(`test`.`t2`.`s1` is null))))) AS `s1 = ANY (SELECT s1 FROM t2)` from `test`.`t1`
explain extended select s1, s1 <> ALL (SELECT s1 FROM t2) from t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL s1 6 NULL 3 100.00 Using index
-2 MATERIALIZED t2 index s1 s1 6 NULL 2 100.00 Using index
+2 SUBQUERY t2 index_subquery s1 s1 6 func 2 100.00 Using index; Full scan on NULL key
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t1`.`s1`>(<in_optimizer>(`test`.`t1`.`s1`,`test`.`t1`.`s1` in ( <materialize> (/* select#2 */ select `test`.`t2`.`s1` from `test`.`t2` ), <primary_index_lookup>(`test`.`t1`.`s1` in <temporary table> on distinct_key where `test`.`t1`.`s1` = `<subquery2>`.`s1`)))) AS `s1 <> ALL (SELECT s1 FROM t2)` from `test`.`t1`
+Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t1`.`s1`>(<in_optimizer>(`test`.`t1`.`s1`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`s1`) in t2 on s1 checking NULL having trigcond(`test`.`t2`.`s1` is null))))) AS `s1 <> ALL (SELECT s1 FROM t2)` from `test`.`t1`
explain extended select s1, s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2') from t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL s1 6 NULL 3 100.00 Using index
-2 MATERIALIZED t2 range s1 s1 6 NULL 1 100.00 Using where; Using index
+2 SUBQUERY t2 index_subquery s1 s1 6 func 2 50.00 Using index; Using where; Full scan on NULL key
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t1`.`s1`>(<in_optimizer>(`test`.`t1`.`s1`,`test`.`t1`.`s1` in ( <materialize> (/* select#2 */ select `test`.`t2`.`s1` from `test`.`t2` where `test`.`t2`.`s1` < 'a2' ), <primary_index_lookup>(`test`.`t1`.`s1` in <temporary table> on distinct_key where `test`.`t1`.`s1` = `<subquery2>`.`s1`)))) AS `s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2')` from `test`.`t1`
+Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t1`.`s1`>(<in_optimizer>(`test`.`t1`.`s1`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`s1`) in t2 on s1 checking NULL where `test`.`t2`.`s1` < 'a2' having trigcond(`test`.`t2`.`s1` is null))))) AS `s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2')` from `test`.`t1`
drop table t1,t2;
create table t2 (a int, b int not null);
create table t3 (a int);
@@ -1887,9 +1896,9 @@ id text
explain extended select * from t1 where id not in (select id from t1 where id < 8);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 12 100.00 Using where
-2 MATERIALIZED t1 range PRIMARY PRIMARY 4 NULL 7 100.00 Using where; Using index
+2 DEPENDENT SUBQUERY t1 unique_subquery PRIMARY PRIMARY 4 func 1 100.00 Using index; Using where
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`text` AS `text` from `test`.`t1` where !<expr_cache><`test`.`t1`.`id`>(<in_optimizer>(`test`.`t1`.`id`,`test`.`t1`.`id` in ( <materialize> (/* select#2 */ select `test`.`t1`.`id` from `test`.`t1` where `test`.`t1`.`id` < 8 ), <primary_index_lookup>(`test`.`t1`.`id` in <temporary table> on distinct_key where `test`.`t1`.`id` = `<subquery2>`.`id`))))
+Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`text` AS `text` from `test`.`t1` where !<expr_cache><`test`.`t1`.`id`>(<in_optimizer>(`test`.`t1`.`id`,<exists>(<primary_index_lookup>(<cache>(`test`.`t1`.`id`) in t1 on PRIMARY where `test`.`t1`.`id` < 8 and <cache>(`test`.`t1`.`id`) = `test`.`t1`.`id`))))
explain extended select * from t1 as tt where not exists (select id from t1 where id < 8 and (id = tt.id or id is null) having id is not null);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY tt ALL NULL NULL NULL NULL 12 100.00 Using where
@@ -2420,19 +2429,22 @@ a
1
3
DROP TABLE t1;
+#
+# SELECT(EXISTS * ...)optimisation
+#
create table t1 (a int, b int);
-insert into t1 values (1,2),(3,4);
-select * from t1 up where exists (select * from t1 where t1.a=up.a);
-a b
-1 2
-3 4
-explain extended select * from t1 up where exists (select * from t1 where t1.a=up.a);
+insert into t1 values (1,2),(3,4),(5,6),(7,8);
+insert into t1 select seq,seq from seq_20_to_40;
+select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
+sum(a+b)
+1296
+explain extended select sum(a+b) from t1 up where exists (select * from t1 where t1.a=up.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY up ALL NULL NULL NULL NULL 2 100.00 Using where
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY up ALL NULL NULL NULL NULL 25 100.00 Using where
+2 MATERIALIZED t1 ALL NULL NULL NULL NULL 25 100.00
Warnings:
Note 1276 Field or reference 'test.up.a' of SELECT #2 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `test`.`up`.`a` AS `a`,`test`.`up`.`b` AS `b` from `test`.`t1` `up` where <expr_cache><`test`.`up`.`a`>(<in_optimizer>(`test`.`up`.`a`,`test`.`up`.`a` in ( <materialize> (/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` where 1 ), <primary_index_lookup>(`test`.`up`.`a` in <temporary table> on distinct_key where `test`.`up`.`a` = `<subquery2>`.`a`))))
+Note 1003 /* select#1 */ select sum(`test`.`up`.`a` + `test`.`up`.`b`) AS `sum(a+b)` from `test`.`t1` `up` where <expr_cache><`test`.`up`.`a`>(<in_optimizer>(`test`.`up`.`a`,`test`.`up`.`a` in ( <materialize> (/* select#2 */ select `test`.`t1`.`a` from `test`.`t1` where 1 ), <primary_index_lookup>(`test`.`up`.`a` in <temporary table> on distinct_key where `test`.`up`.`a` = `<subquery2>`.`a`))))
drop table t1;
CREATE TABLE t1 (t1_a int);
INSERT INTO t1 VALUES (1);
@@ -3099,9 +3111,13 @@ retailerID statusID changed
0048 1 2006-01-06 12:37:50
0059 1 2006-01-06 12:37:50
drop table t1;
+#
+# Bug#21180 Subselect with index for both WHERE and ORDER BY
+# produces empty result
+#
create table t1(a int, primary key (a));
insert into t1 values (10);
-create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
+create table t2 (a int primary key, b varchar(32), c int, unique key cb(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
analyze table t1;
@@ -3114,7 +3130,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using where
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3126,7 +3142,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
+2 SUBQUERY t2 range cb cb 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -3177,7 +3193,7 @@ INSERT INTO t2 VALUES (1),(2),(3);
EXPLAIN SELECT a, a IN (SELECT a FROM t1) FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 3
-2 MATERIALIZED t1 index a a 5 NULL 5 Using index
+2 SUBQUERY t1 index_subquery a a 5 func 3 Using index; Full scan on NULL key
SELECT a, a IN (SELECT a FROM t1) FROM t2;
a a IN (SELECT a FROM t1)
1 1
@@ -4224,8 +4240,8 @@ INSERT INTO t2 VALUES (7), (5), (1), (3);
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id);
id st
-3 FL
1 GA
+3 FL
7 FL
SELECT id, st FROM t1
WHERE st IN ('GA','FL') AND EXISTS(SELECT 1 FROM t2 WHERE t2.id=t1.id)
@@ -4326,6 +4342,9 @@ SELECT ((a1,a2) IN (SELECT * FROM t2 WHERE b2 > 0)) IS NULL FROM t1;
0
0
DROP TABLE t1, t2;
+#
+# Bug#28076 inconsistent binary/varbinary comparison
+#
CREATE TABLE t1 (s1 BINARY(5), s2 VARBINARY(5));
INSERT INTO t1 VALUES (0x41,0x41), (0x42,0x42), (0x43,0x43);
SELECT s1, s2 FROM t1 WHERE s2 IN (SELECT s1 FROM t1);
@@ -4388,7 +4407,7 @@ CREATE INDEX I2 ON t1 (b);
EXPLAIN SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY t1 index_subquery I1 I1 2 func 2 Using index; Using where
+2 DEPENDENT SUBQUERY t1 index_subquery I1 I1 2 func 1 Using index; Using where
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1);
a b
CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(10));
@@ -4398,14 +4417,14 @@ CREATE INDEX I2 ON t2 (b);
EXPLAIN SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY t2 index_subquery I1 I1 4 func 2 Using index; Using where
+2 DEPENDENT SUBQUERY t2 index_subquery I1 I1 4 func 1 Using index; Using where
SELECT a,b FROM t2 WHERE b IN (SELECT a FROM t2);
a b
EXPLAIN
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY t1 index_subquery I1 I1 2 func 2 Using index; Using where
+2 DEPENDENT SUBQUERY t1 index_subquery I1 I1 2 func 1 Using index; Using where
SELECT a,b FROM t1 WHERE b IN (SELECT a FROM t1 WHERE LENGTH(a)<500);
a b
DROP TABLE t1,t2;
@@ -4435,10 +4454,13 @@ out_a MIN(b)
1 2
2 4
DROP TABLE t1;
+#
+# Bug#32036 EXISTS within a WHERE clause with a UNION crashes MySQL 5.122
+#
CREATE TABLE t1 (a INT);
CREATE TABLE t2 (a INT);
-INSERT INTO t1 VALUES (1),(2);
-INSERT INTO t2 VALUES (1),(2);
+INSERT INTO t1 VALUES (1),(2),(3),(4);
+INSERT INTO t2 VALUES (1),(2),(1000);
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
2
2
@@ -4446,8 +4468,8 @@ SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select 2 AS `2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,`test`.`t1`.`a` in ( <materialize> (/* select#2 */ select `test`.`t2`.`a` from `test`.`t2` where 1 ), <primary_index_lookup>(`test`.`t1`.`a` in <temporary table> on distinct_key where `test`.`t1`.`a` = `<subquery2>`.`a`))))
@@ -4455,9 +4477,9 @@ EXPLAIN EXTENDED
SELECT 2 FROM t1 WHERE EXISTS ((SELECT 1 FROM t2 WHERE t1.a=t2.a) UNION
(SELECT 1 FROM t2 WHERE t1.a = t2.a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00 Using where
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 3 100.00 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
@@ -4615,7 +4637,7 @@ WHERE outr.int_key NOT IN (SELECT t1.pk FROM t1, t2)
ORDER BY outr.pk;
id select_type table type possible_keys key key_len ref rows Extra
x x outr ALL x x x x x x
-x x t1 index x x x x x x
+x x t1 eq_ref x x x x x x
x x t2 index x x x x x x
# should not crash on debug binaries
SELECT * FROM t2 outr
@@ -5693,7 +5715,8 @@ DROP TABLE IF EXISTS ot1, ot4, it2, it3;
CREATE TABLE t1 (a int) ;
INSERT INTO t1 VALUES (NULL), (1), (NULL), (2);
CREATE TABLE t2 (a int, INDEX idx(a)) ;
-INSERT INTO t2 VALUES (NULL), (1), (NULL);
+INSERT INTO t2 VALUES (NULL), (1), (NULL),(1000);
+insert into t2 select seq from seq_3_to_500;
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 USE INDEX () WHERE t2.a = t1.a);
a
@@ -5703,7 +5726,7 @@ SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 USE INDEX() WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 502
SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
a
@@ -5713,7 +5736,7 @@ SELECT * FROM t1
WHERE EXISTS (SELECT a FROM t2 WHERE t2.a = t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where
-2 MATERIALIZED t2 index idx idx 5 NULL 3 Using index
+2 DEPENDENT SUBQUERY t2 index_subquery idx idx 5 func 58 Using index
DROP TABLE t1,t2;
#
# BUG#752992: Wrong results for a subquery with 'semijoin=on'
@@ -5731,8 +5754,8 @@ SET join_cache_level=0;
EXPLAIN SELECT * FROM t1 WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON 1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-2 MATERIALIZED t2 index NULL PRIMARY 4 NULL 3 Using index
-2 MATERIALIZED it index PRIMARY PRIMARY 4 NULL 3 Using index
+2 DEPENDENT SUBQUERY it eq_ref PRIMARY PRIMARY 4 func 1 Using index
+2 DEPENDENT SUBQUERY t2 index NULL PRIMARY 4 NULL 3 Using index
SELECT * FROM t1 WHERE pk IN (SELECT it.pk FROM t2 JOIN t2 AS it ON 1);
pk i
11 0
@@ -6073,7 +6096,7 @@ WHERE col_varchar_nokey IN
(SELECT col_varchar_key FROM it1 WHERE col_int_key IS NULL);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY ot system NULL NULL NULL NULL 1
-2 DEPENDENT SUBQUERY it1 index_subquery idx_cvk_cik idx_cvk_cik 9 func,const 2 Using index; Using where
+2 DEPENDENT SUBQUERY it1 index_subquery idx_cvk_cik idx_cvk_cik 9 func,const 1 Using index; Using where
SELECT col_int_nokey FROM ot
WHERE col_varchar_nokey IN
(SELECT col_varchar_key FROM it1 WHERE col_int_key IS NULL);
@@ -6629,7 +6652,7 @@ SET @@optimizer_switch='semijoin=off,materialization=off,in_to_exists=on,subquer
EXPLAIN SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index NULL a 4 NULL 2 Using where; Using index
-2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 2 Using index
+2 DEPENDENT SUBQUERY t1 index_subquery a a 4 func 1 Using index
SELECT * FROM t1 WHERE a IN (SELECT a AS field1 FROM t1 GROUP BY field1);
a
2009-01-01
@@ -6832,7 +6855,7 @@ FROM t1 AS alias1, t1 AS alias2, t1 AS alias3
WHERE alias1.a = alias2.a OR ('Moscow') IN ( SELECT a FROM t1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY alias1 index a a 19 NULL 11 Using where; Using index
-1 PRIMARY alias2 ref a a 19 test.alias1.a 2 Using index
+1 PRIMARY alias2 ref a a 19 test.alias1.a 1 Using index
1 PRIMARY alias3 index NULL a 19 NULL 11 Using index; Using join buffer (flat, BNL join)
2 SUBQUERY t1 index_subquery a a 19 const 1 Using index; Using where
SELECT MAX( alias2.a )
@@ -6983,7 +7006,7 @@ WHERE SLEEP(0.1) OR c < 'p' OR b = ( SELECT MIN(b) FROM t2 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
1 PRIMARY t2 ALL b NULL NULL NULL 2 Using where
-1 PRIMARY t3 ref d d 5 test.t2.b 2 Using index
+1 PRIMARY t3 ref d d 5 test.t2.b 1 Using index
3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
set @tmp_mdev410=@@global.userstat;
set global userstat=on;
@@ -7015,7 +7038,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-435: Expensive subqueries may be evaluated during optimization in merge_key_fields
@@ -7049,7 +7072,7 @@ EXPLAIN SELECT * FROM t1 WHERE EXISTS ( SELECT a FROM t1, t2 WHERE b = a GROUP B
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index
-2 SUBQUERY t2 ref b b 5 test.t1.a 2 Using index
+2 SUBQUERY t2 ref b b 5 test.t1.a 1 Using index
DROP TABLE t1,t2;
#
# MDEV-5991: crash in Item_field::used_tables
@@ -7415,12 +7438,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -7444,12 +7470,15 @@ EXPLAIN
{
"query_block": {
"select_id": 3,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "1 = t3.c"
}
diff --git a/mysql-test/main/subselect_no_semijoin.test b/mysql-test/main/subselect_no_semijoin.test
index 84d312c03c8..83488829448 100644
--- a/mysql-test/main/subselect_no_semijoin.test
+++ b/mysql-test/main/subselect_no_semijoin.test
@@ -24,6 +24,7 @@ INSERT INTO t3 VALUES (4),(5);
SET @tmp19714=@@optimizer_switch;
SET optimizer_switch='subquery_cache=off';
+--source include/explain-no-costs.inc
explain format=json
SELECT ( SELECT b FROM t2 WHERE b = a OR EXISTS ( SELECT c FROM t3 WHERE c = b ) ) FROM t1;
diff --git a/mysql-test/main/subselect_partial_match.result b/mysql-test/main/subselect_partial_match.result
index 52c30492675..16a487445e4 100644
--- a/mysql-test/main/subselect_partial_match.result
+++ b/mysql-test/main/subselect_partial_match.result
@@ -760,20 +760,19 @@ drop table t1,t2;
# LP BUG#601156
#
CREATE TABLE t1 (a1 int DEFAULT NULL, a2 int DEFAULT NULL);
-INSERT INTO t1 VALUES (NULL,2);
-INSERT INTO t1 VALUES (4,NULL);
+INSERT INTO t1 VALUES (NULL,2), (4,NULL),(100,100);
CREATE TABLE t2 (b1 int DEFAULT NULL, b2 int DEFAULT NULL);
-INSERT INTO t2 VALUES (6,NULL);
-INSERT INTO t2 VALUES (NULL,0);
+INSERT INTO t2 VALUES (6,NULL), (NULL,0),(1000,1000);
+insert into t2 select seq,seq from seq_2000_to_2100;
set @@optimizer_switch='materialization=on,semijoin=off,partial_match_rowid_merge=on,partial_match_table_scan=on';
set @tmp_optimizer_switch=@@optimizer_switch;
set optimizer_switch='derived_merge=off,derived_with_keys=off';
EXPLAIN EXTENDED
SELECT * FROM (SELECT * FROM t1 WHERE a1 NOT IN (SELECT b2 FROM t2)) table1;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2 100.00
-2 DERIVED t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3 100.00
+2 DERIVED t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+3 MATERIALIZED t2 ALL NULL NULL NULL NULL 104 100.00
Warnings:
Note 1003 /* select#1 */ select `table1`.`a1` AS `a1`,`table1`.`a2` AS `a2` from (/* select#2 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where !(<in_optimizer>(`test`.`t1`.`a1`,`test`.`t1`.`a1` in ( <materialize> (/* select#3 */ select `test`.`t2`.`b2` from `test`.`t2` ), <primary_index_lookup>(`test`.`t1`.`a1` in <temporary table> on distinct_key where `test`.`t1`.`a1` = `<subquery3>`.`b2`))))) `table1`
set optimizer_switch=@tmp_optimizer_switch;
@@ -782,12 +781,13 @@ DROP TABLE t1, t2;
# LP BUG#613009 Crash in Ordered_key::get_field_idx
#
set @@optimizer_switch='materialization=on,semijoin=off,partial_match_rowid_merge=on,partial_match_table_scan=off';
-create table t1 (a1 char(3) DEFAULT NULL, a2 char(3) DEFAULT NULL);
-insert into t1 values (NULL, 'a21'), (NULL, 'a22');
+create table t1 (a1 char(4) DEFAULT NULL, a2 char(4) DEFAULT NULL);
+insert into t1 values (NULL, 'a21'), (NULL, 'a22'), ('xxx','xxx');
+insert into t1 select seq,seq from seq_2000_to_2100;
explain select * from t1 where (a1, a2) not in (select a1, a2 from t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 2
+1 PRIMARY t1 ALL NULL NULL NULL NULL 104 Using where
+2 MATERIALIZED t1 ALL NULL NULL NULL NULL 104
select * from t1 where (a1, a2) not in (select a1, a2 from t1);
a1 a2
drop table t1;
diff --git a/mysql-test/main/subselect_partial_match.test b/mysql-test/main/subselect_partial_match.test
index fd1e6de716c..9a32ef5491c 100644
--- a/mysql-test/main/subselect_partial_match.test
+++ b/mysql-test/main/subselect_partial_match.test
@@ -2,6 +2,7 @@
# Tests for
# MWL#68: Subquery optimization: Efficient NOT IN execution with NULLs
#
+--source include/have_sequence.inc
set @save_optimizer_switch=@@optimizer_switch;
@@ -614,11 +615,11 @@ drop table t1,t2;
--echo #
CREATE TABLE t1 (a1 int DEFAULT NULL, a2 int DEFAULT NULL);
-INSERT INTO t1 VALUES (NULL,2);
-INSERT INTO t1 VALUES (4,NULL);
+INSERT INTO t1 VALUES (NULL,2), (4,NULL),(100,100);
CREATE TABLE t2 (b1 int DEFAULT NULL, b2 int DEFAULT NULL);
-INSERT INTO t2 VALUES (6,NULL);
-INSERT INTO t2 VALUES (NULL,0);
+INSERT INTO t2 VALUES (6,NULL), (NULL,0),(1000,1000);
+
+insert into t2 select seq,seq from seq_2000_to_2100;
set @@optimizer_switch='materialization=on,semijoin=off,partial_match_rowid_merge=on,partial_match_table_scan=on';
@@ -636,8 +637,9 @@ DROP TABLE t1, t2;
set @@optimizer_switch='materialization=on,semijoin=off,partial_match_rowid_merge=on,partial_match_table_scan=off';
-create table t1 (a1 char(3) DEFAULT NULL, a2 char(3) DEFAULT NULL);
-insert into t1 values (NULL, 'a21'), (NULL, 'a22');
+create table t1 (a1 char(4) DEFAULT NULL, a2 char(4) DEFAULT NULL);
+insert into t1 values (NULL, 'a21'), (NULL, 'a22'), ('xxx','xxx');
+insert into t1 select seq,seq from seq_2000_to_2100;
explain select * from t1 where (a1, a2) not in (select a1, a2 from t1);
select * from t1 where (a1, a2) not in (select a1, a2 from t1);
drop table t1;
diff --git a/mysql-test/main/subselect_sj.result b/mysql-test/main/subselect_sj.result
index b69471edce3..054c71351d0 100644
--- a/mysql-test/main/subselect_sj.result
+++ b/mysql-test/main/subselect_sj.result
@@ -76,24 +76,24 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t10` join `test`.`t12` join `test`.`t1` where `test`.`t12`.`pk` = `test`.`t10`.`a` and `test`.`t10`.`pk` = `test`.`t1`.`a`
subqueries within outer joins go into ON expr.
-explAin extended
+explain extended
select * from t1 left join (t2 A, t2 B) on ( A.A= t1.A And B.A in (select pk from t10));
-id select_type tABle type possiBle_keys key key_len ref rows filtered ExtrA
+id select_type table type possible_keys key key_len ref rows filtered ExtrA
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
1 PRIMARY A ALL NULL NULL NULL NULL 3 100.00 Using where
1 PRIMARY B ALL NULL NULL NULL NULL 3 100.00 Using where
-2 MATERIALIZED t10 index PRIMARY PRIMARY 4 NULL 10 100.00 Using index
+2 DEPENDENT SUBQUERY t10 unique_suBquery PRIMARY PRIMARY 4 func 1 100.00 Using index
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`A` AS `A`,`test`.`t1`.`B` AS `B`,`test`.`A`.`A` AS `A`,`test`.`A`.`B` AS `B`,`test`.`B`.`A` AS `A`,`test`.`B`.`B` AS `B` from `test`.`t1` left join (`test`.`t2` `A` join `test`.`t2` `B`) on(`test`.`A`.`A` = `test`.`t1`.`A` And <in_optimizer>(`test`.`B`.`A`,`test`.`B`.`A` in ( <mAteriAlize> (/* select#2 */ select `test`.`t10`.`pk` from `test`.`t10` ), <primAry_index_lookup>(`test`.`B`.`A` in <temporAry tABle> on distinct_key where `test`.`B`.`A` = `<suBquery2>`.`pk`)))) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`A` AS `A`,`test`.`t1`.`B` AS `B`,`test`.`A`.`A` AS `A`,`test`.`A`.`B` AS `B`,`test`.`B`.`A` AS `A`,`test`.`B`.`B` AS `B` from `test`.`t1` left join (`test`.`t2` `A` join `test`.`t2` `B`) on(`test`.`A`.`A` = `test`.`t1`.`A` And <in_optimizer>(`test`.`B`.`A`,<exists>(<primAry_index_lookup>(<cAche>(`test`.`B`.`A`) in t10 on PRIMARY)))) where 1
t2 should be wrapped into OJ-nest, so we have "t1 LJ (t2 J t10)"
-explAin extended
+explain extended
select * from t1 left join t2 on (t2.A= t1.A And t2.A in (select pk from t10));
-id select_type tABle type possiBle_keys key key_len ref rows filtered ExtrA
+id select_type table type possible_keys key key_len ref rows filtered ExtrA
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
-2 MATERIALIZED t10 index PRIMARY PRIMARY 4 NULL 10 100.00 Using index
+2 DEPENDENT SUBQUERY t10 unique_suBquery PRIMARY PRIMARY 4 func 1 100.00 Using index
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`A` AS `A`,`test`.`t1`.`B` AS `B`,`test`.`t2`.`A` AS `A`,`test`.`t2`.`B` AS `B` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`A` = `test`.`t1`.`A` And <in_optimizer>(`test`.`t1`.`A`,`test`.`t1`.`A` in ( <mAteriAlize> (/* select#2 */ select `test`.`t10`.`pk` from `test`.`t10` ), <primAry_index_lookup>(`test`.`t1`.`A` in <temporAry tABle> on distinct_key where `test`.`t1`.`A` = `<suBquery2>`.`pk`)))) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`A` AS `A`,`test`.`t1`.`B` AS `B`,`test`.`t2`.`A` AS `A`,`test`.`t2`.`B` AS `B` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`A` = `test`.`t1`.`A` And <in_optimizer>(`test`.`t1`.`A`,<exists>(<primAry_index_lookup>(<cAche>(`test`.`t2`.`A`) in t10 on PRIMARY)))) where 1
set @save_join_buffer_size=@@join_buffer_size;
set join_buffer_size=8*1024;
we shouldn't flatten if we're going to get a join of > MAX_TABLES.
@@ -160,26 +160,26 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY s47 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
1 PRIMARY s48 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
1 PRIMARY s49 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m00 ALL NULL NULL NULL NULL 3 Using where
-2 DEPENDENT SUBQUERY m01 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m02 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m03 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m04 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m05 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m06 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m07 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m08 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m09 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m10 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m11 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m12 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m13 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m14 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m15 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m16 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m17 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m18 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m19 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m00 ALL NULL NULL NULL NULL 3
+2 MATERIALIZED m01 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m02 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m03 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m04 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m05 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m06 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m07 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m08 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m09 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m10 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m11 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m12 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m13 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m14 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m15 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m16 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m17 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m18 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m19 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
select * from
t1 left join t2 on (t2.a= t1.a and t2.a in (select pk from t10))
where t1.a < 5;
@@ -344,8 +344,8 @@ WHERE PNUM IN
(SELECT PNUM FROM PROJ));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY STAFF ALL NULL NULL NULL NULL 5
-1 PRIMARY PROJ ALL NULL NULL NULL NULL 6
-1 PRIMARY WORKS ALL NULL NULL NULL NULL 12 Using where; FirstMatch(STAFF)
+1 PRIMARY PROJ ALL NULL NULL NULL NULL 6 Start temporary
+1 PRIMARY WORKS ALL NULL NULL NULL NULL 12 Using where; End temporary
SELECT EMPNUM, EMPNAME
FROM STAFF
WHERE EMPNUM IN
@@ -502,7 +502,7 @@ EXPLAIN EXTENDED SELECT vkey FROM t0 WHERE pk IN
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t0 ALL PRIMARY NULL NULL NULL 5 100.00
1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t0.pk 1 100.00 Using where
-1 PRIMARY t2 ref vkey vkey 4 test.t1.vnokey 2 100.00 Using index; FirstMatch(t1)
+1 PRIMARY t2 ref vkey vkey 4 test.t1.vnokey 1 100.00 Using index; FirstMatch(t1)
Warnings:
Note 1003 select `test`.`t0`.`vkey` AS `vkey` from `test`.`t0` `t1` semi join (`test`.`t0` `t2`) join `test`.`t0` where `test`.`t1`.`pk` = `test`.`t0`.`pk` and `test`.`t2`.`vkey` = `test`.`t1`.`vnokey`
SELECT vkey FROM t0 WHERE pk IN
@@ -763,16 +763,16 @@ explain extended
select a from t1
where a in (select c from t2 where d >= some(select e from t3 where b=e));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 7 100.00
-1 PRIMARY t2 ALL NULL NULL NULL NULL 6 100.00 Using where; FirstMatch(t1)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 6 100.00 Start temporary
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 16.67 Using where; End temporary; Using join buffer (flat, BNL join)
3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 4 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.t1.b' of SELECT #3 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`c` = `test`.`t1`.`a` and <nop>(<expr_cache><`test`.`t2`.`d`,`test`.`t1`.`b`>(<in_optimizer>(`test`.`t2`.`d`,<exists>(/* select#3 */ select `test`.`t3`.`e` from `test`.`t3` where `test`.`t1`.`b` = `test`.`t3`.`e` and <cache>(`test`.`t2`.`d`) >= `test`.`t3`.`e`))))
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t1`.`a` = `test`.`t2`.`c` and <nop>(<expr_cache><`test`.`t2`.`d`,`test`.`t1`.`b`>(<in_optimizer>(`test`.`t2`.`d`,<exists>(/* select#3 */ select `test`.`t3`.`e` from `test`.`t3` where `test`.`t1`.`b` = `test`.`t3`.`e` and <cache>(`test`.`t2`.`d`) >= `test`.`t3`.`e`))))
show warnings;
Level Code Message
Note 1276 Field or reference 'test.t1.b' of SELECT #3 was resolved in SELECT #1
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`c` = `test`.`t1`.`a` and <nop>(<expr_cache><`test`.`t2`.`d`,`test`.`t1`.`b`>(<in_optimizer>(`test`.`t2`.`d`,<exists>(/* select#3 */ select `test`.`t3`.`e` from `test`.`t3` where `test`.`t1`.`b` = `test`.`t3`.`e` and <cache>(`test`.`t2`.`d`) >= `test`.`t3`.`e`))))
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t1`.`a` = `test`.`t2`.`c` and <nop>(<expr_cache><`test`.`t2`.`d`,`test`.`t1`.`b`>(<in_optimizer>(`test`.`t2`.`d`,<exists>(/* select#3 */ select `test`.`t3`.`e` from `test`.`t3` where `test`.`t1`.`b` = `test`.`t3`.`e` and <cache>(`test`.`t2`.`d`) >= `test`.`t3`.`e`))))
select a from t1
where a in (select c from t2 where d >= some(select e from t3 where b=e));
a
@@ -802,20 +802,20 @@ PRIMARY KEY (pk)
INSERT INTO t1 VALUES (1,'o','ffff','ffff','ffoo','ffff','ffff','ffff','ffff','ffff','ffff',GeomFromText('POLYGON((0 0, 0 2, 2 2, 2 0, 0 0))')), (2,'f','ffff','ffff','ffff', 'ffff','ffff','ffff','ffff','ffff','ffff',GeomFromText('POLYGON((0 0, 0 2, 2 2, 2 0, 0 0))'));
CREATE TABLE t2 LIKE t1;
INSERT INTO t2 VALUES (1,'i','iiii','iiii','iiii','iiii','ffff','ffff','ffff','ffff','ffff',GeomFromText('POLYGON((0 0, 0 2, 2 2, 2 0, 0 0))')), (2,'f','ffff','ffff','ffff','ffff','ffff','ffff','ffff','ffff','ffff',GeomFromText('POLYGON((0 0, 0 2, 2 2, 2 0, 0 0))'));
+insert into t2 (pk) values (-1),(0);
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (a, b) IN (SELECT a, b FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 11 func,func 1 100.00
-2 MATERIALIZED t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
Warnings:
-Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`pk` > 0
+Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`a` = `test`.`t1`.`a` and `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (a, b) IN (SELECT a, b FROM t2 WHERE pk > 0);
pk
2
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, c) IN (SELECT b, c FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`c` = `test`.`t1`.`c` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, c) IN (SELECT b, c FROM t2 WHERE pk > 0);
@@ -825,7 +825,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, d) IN (SELECT b, d FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`d` = `test`.`t1`.`d` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, d) IN (SELECT b, d FROM t2 WHERE pk > 0);
@@ -834,7 +834,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, e) IN (SELECT b, e FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`e` = `test`.`t1`.`e` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, e) IN (SELECT b, e FROM t2 WHERE pk > 0);
@@ -844,7 +844,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, f) IN (SELECT b, f FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`f` = `test`.`t1`.`f` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, f) IN (SELECT b, f FROM t2 WHERE pk > 0);
@@ -854,7 +854,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, g) IN (SELECT b, g FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`g` = `test`.`t1`.`g` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, g) IN (SELECT b, g FROM t2 WHERE pk > 0);
@@ -864,7 +864,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, h) IN (SELECT b, h FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`h` = `test`.`t1`.`h` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, h) IN (SELECT b, h FROM t2 WHERE pk > 0);
@@ -874,7 +874,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, i) IN (SELECT b, i FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`i` = `test`.`t1`.`i` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, i) IN (SELECT b, i FROM t2 WHERE pk > 0);
@@ -884,7 +884,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, j) IN (SELECT b, j FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`j` = `test`.`t1`.`j` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, j) IN (SELECT b, j FROM t2 WHERE pk > 0);
@@ -894,7 +894,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, k) IN (SELECT b, k FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`k` = `test`.`t1`.`k` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, k) IN (SELECT b, k FROM t2 WHERE pk > 0);
@@ -1249,8 +1249,8 @@ INSERT INTO t2 VALUES (1, 0), (1, 1), (2, 0), (2, 1);
EXPLAIN
SELECT * FROM t1 WHERE (i) IN (SELECT i FROM t2 where j > 0);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index k k 10 NULL 4 Using where; Using index; Start temporary
-1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t2 ref k k 5 test.t1.i 1 Using where; Using index; Start temporary; End temporary
SELECT * FROM t1 WHERE (i) IN (SELECT i FROM t2 where j > 0);
i
1
@@ -1612,7 +1612,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY A index PRIMARY PRIMARY 4 NULL 3 Using index
1 PRIMARY C eq_ref PRIMARY PRIMARY 4 test.A.t1field 1 Using index
1 PRIMARY D eq_ref PRIMARY PRIMARY 4 test.A.t1field 1 Using index
-1 PRIMARY B index NULL PRIMARY 4 NULL 3 Using index; Start temporary; End temporary
+1 PRIMARY B index NULL PRIMARY 4 NULL 3 Using index; FirstMatch(D)
SELECT * FROM t1 A
WHERE
A.t1field IN (SELECT A.t1field FROM t2 B) AND
@@ -1626,7 +1626,7 @@ drop table t1,t2;
# BUG#787299: Valgrind complains on a join query with two IN subqueries
#
create table t1 (a int);
-insert into t1 values (1), (2), (3);
+insert into t1 values (1), (2), (3),(1000),(2000);
create table t2 as select * from t1;
select * from t1 A, t1 B
where A.a = B.a and A.a in (select a from t2 C) and B.a in (select a from t2 D);
@@ -1634,16 +1634,18 @@ a a
1 1
2 2
3 3
+1000 1000
+2000 2000
explain
select * from t1 A, t1 B
where A.a = B.a and A.a in (select a from t2 C) and B.a in (select a from t2 D);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY A ALL NULL NULL NULL NULL 3
+1 PRIMARY A ALL NULL NULL NULL NULL 5
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-1 PRIMARY B ALL NULL NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY B ALL NULL NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED C ALL NULL NULL NULL NULL 3
-3 MATERIALIZED D ALL NULL NULL NULL NULL 3
+2 MATERIALIZED C ALL NULL NULL NULL NULL 5
+3 MATERIALIZED D ALL NULL NULL NULL NULL 5
drop table t1, t2;
#
# BUG#784441: Abort on semijoin with a view as the inner table
@@ -1980,7 +1982,7 @@ f1 f3 f4 f2 f4
DROP TABLE t1,t2,t3;
#
# BUG#803457: Wrong result with semijoin + view + outer join in maria-5.3-subqueries-mwl90
-# (Original testcase)
+# (Original, slightly modified testcase)
#
CREATE TABLE t1 (f1 int, f2 int );
INSERT INTO t1 VALUES (2,0),(4,0),(0,NULL);
@@ -1990,24 +1992,22 @@ CREATE TABLE t3 ( f1 int, f3 int );
INSERT INTO t3 VALUES (2,0),(4,0),(0,NULL),(4,0),(8,0);
CREATE TABLE t4 ( f2 int, KEY (f2) );
INSERT INTO t4 VALUES (0),(NULL);
-CREATE VIEW v4 AS SELECT DISTINCT f2 FROM t4 ;
+INSERT INTO t4 VALUES (0),(NULL),(-1),(-2),(-3);
# The following must not have outer joins:
explain extended
-SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4);
+SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4 where f2 = 0 or f2 IS NULL);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t4 ref_or_null f2 f2 5 const 4 25.00 Using where; Using index; FirstMatch(t2)
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
1 PRIMARY t3 ALL NULL NULL NULL NULL 5 100.00 Using where; Using join buffer (flat, BNL join)
-2 MATERIALIZED t4 index f2 f2 5 NULL 2 100.00 Using index
Warnings:
-Note 1003 select `test`.`t1`.`f1` AS `f1`,`test`.`t1`.`f2` AS `f2`,`test`.`t2`.`f3` AS `f3`,`test`.`t3`.`f3` AS `f3` from `test`.`t1` semi join (`test`.`t4`) join `test`.`t2` join `test`.`t3` where `test`.`t3`.`f1` = `test`.`t1`.`f1` and `test`.`t1`.`f2` = `test`.`t2`.`f2`
-SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4);
+Note 1003 select `test`.`t1`.`f1` AS `f1`,`test`.`t1`.`f2` AS `f2`,`test`.`t2`.`f3` AS `f3`,`test`.`t3`.`f3` AS `f3` from `test`.`t1` semi join (`test`.`t4`) join `test`.`t2` join `test`.`t3` where `test`.`t4`.`f2` = `test`.`t2`.`f3` and `test`.`t3`.`f1` = `test`.`t1`.`f1` and `test`.`t1`.`f2` = `test`.`t2`.`f2` and (`test`.`t2`.`f3` = 0 or `test`.`t2`.`f3` is null)
+SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4 where f2 = 0 or f2 IS NULL);
f1 f2 f3 f3
2 0 0 0
4 0 0 0
4 0 0 0
-drop view v4;
drop table t1, t2, t3, t4;
#
# BUG#803303: Wrong result with semijoin=on, outer join in maria-5.3-subqueries-mwl90
@@ -2153,9 +2153,9 @@ INSERT INTO t3 VALUES (6,5),(6,2),(8,0),(9,1),(6,5);
explain
SELECT * FROM t1, t2 WHERE (t2.a , t1.b) IN (SELECT a, b FROM t3);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 2 Using index; Using join buffer (flat, BNL join)
-1 PRIMARY t3 ALL b NULL NULL NULL 5 Using where; Start temporary; End temporary
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
+1 PRIMARY t3 ref b b 5 test.t1.b 1 Using where; Start temporary
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t3.a 1 Using index; End temporary
SELECT * FROM t1, t2 WHERE (t2.a , t1.b) IN (SELECT a, b FROM t3);
b a
5 6
@@ -2178,10 +2178,10 @@ INSERT INTO t5 VALUES (7,0),(9,0);
explain
SELECT * FROM t3 WHERE t3.a IN (SELECT t5.a FROM t2, t4, t5 WHERE t2.c = t5.a AND t2.b = t5.b);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t5 index a a 10 NULL 2 Using where; Using index; LooseScan
-1 PRIMARY t2 ref b b 5 test.t5.b 2 Using where
-1 PRIMARY t4 ALL NULL NULL NULL NULL 3 FirstMatch(t5)
-1 PRIMARY t3 ALL NULL NULL NULL NULL 15 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t5 index a a 10 NULL 2 Using where; Using index; Start temporary
+1 PRIMARY t2 ref b b 5 test.t5.b 1 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 3
+1 PRIMARY t3 ALL NULL NULL NULL NULL 15 Using where; End temporary; Using join buffer (flat, BNL join)
SELECT * FROM t3 WHERE t3.a IN (SELECT t5.a FROM t2, t4, t5 WHERE t2.c = t5.a AND t2.b = t5.b);
a
0
@@ -2260,10 +2260,10 @@ alias1.c IN (SELECT SQ3_alias1.b
FROM t2 AS SQ3_alias1 STRAIGHT_JOIN t2 AS SQ3_alias2)
LIMIT 100;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 20
-1 PRIMARY alias2 ALL NULL NULL NULL NULL 20 Using join buffer (flat, BNL join)
+1 PRIMARY alias2 ALL NULL NULL NULL NULL 20
1 PRIMARY t2 ALL NULL NULL NULL NULL 20 Using join buffer (flat, BNL join)
-1 PRIMARY SQ3_alias1 ALL NULL NULL NULL NULL 20 Using where; Start temporary
+1 PRIMARY SQ3_alias1 ALL NULL NULL NULL NULL 20 Start temporary
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join)
1 PRIMARY SQ3_alias2 index NULL PRIMARY 4 NULL 20 Using index; End temporary
2 DERIVED t2 ALL NULL NULL NULL NULL 20
create table t3 as
@@ -2436,9 +2436,9 @@ SET SESSION optimizer_switch='loosescan=off';
EXPLAIN
SELECT * FROM t1 WHERE a IN (SELECT t2.a FROM t2,t3 WHERE t2.b = t3.b);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index idx idx 9 NULL 2 Using where; Using index; Start temporary
-1 PRIMARY t3 ref idx idx 4 test.t2.b 1 Using index
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4
+1 PRIMARY t2 range idx idx 4 NULL 2 Using where; Using index
+1 PRIMARY t3 ref idx idx 4 test.t2.b 1 Using index; FirstMatch(t1)
SELECT * FROM t1 WHERE a IN (SELECT t2.a FROM t2,t3 WHERE t2.b = t3.b);
a
5
@@ -2446,9 +2446,9 @@ SET SESSION optimizer_switch='loosescan=on';
EXPLAIN
SELECT * FROM t1 WHERE a IN (SELECT t2.a FROM t2,t3 WHERE t2.b = t3.b);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index idx idx 9 NULL 2 Using where; Using index; Start temporary
-1 PRIMARY t3 ref idx idx 4 test.t2.b 1 Using index
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4
+1 PRIMARY t2 range idx idx 4 NULL 2 Using where; Using index
+1 PRIMARY t3 ref idx idx 4 test.t2.b 1 Using index; FirstMatch(t1)
SELECT * FROM t1 WHERE a IN (SELECT t2.a FROM t2,t3 WHERE t2.b = t3.b);
a
5
@@ -2499,10 +2499,9 @@ SELECT * FROM t1, t2
WHERE t1.a = t2.a AND t2.a IN (SELECT b FROM t3 STRAIGHT_JOIN t4);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 system NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 1
1 PRIMARY t1 ref a a 5 const 1 Using index
1 PRIMARY t2 ref a a 5 func 1 Using index
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 0
+1 PRIMARY t4 ALL NULL NULL NULL NULL 0 FirstMatch(t2); Using join buffer (flat, BNL join)
SELECT * FROM t1, t2
WHERE t1.a = t2.a AND t2.a IN (SELECT b FROM t3 STRAIGHT_JOIN t4);
a a
@@ -2562,7 +2561,7 @@ INSERT INTO t1 VALUES
(6,3),(7,1),(8,4),(9,3),(10,2);
CREATE TABLE t2 ( c INT, d INT, KEY(c) );
INSERT INTO t2 VALUES
-(1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1);
+(1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1),(11,11);
analyze table t1,t2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
@@ -2572,35 +2571,35 @@ test.t2 analyze status OK
explain
SELECT a, b, d FROM t1, t2
WHERE ( b, d ) IN
-( SELECT b, d FROM t1, t2 WHERE b = c );
+( SELECT b, d FROM t1 as t3, t2 as t4 WHERE b = c );
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 7
-1 PRIMARY t1 index b b 5 NULL 10 Using where; Using index; LooseScan
-1 PRIMARY t2 ref c c 5 test.t1.b 1 Using where; FirstMatch(t1)
-1 PRIMARY t1 ref b b 5 test.t1.b 2
+1 PRIMARY t3 index b b 5 NULL 10 Using where; Using index; Start temporary
+1 PRIMARY t4 ref c c 5 test.t3.b 1
+1 PRIMARY t1 ALL b NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 8 Using where; End temporary; Using join buffer (flat, BNL join)
SELECT a, b, d FROM t1, t2
WHERE ( b, d ) IN
-( SELECT b, d FROM t1, t2 WHERE b = c );
+( SELECT b, d FROM t1 as t3, t2 as t4 WHERE b = c );
a b d
-2 1 2
-7 1 2
-8 4 2
1 2 1
-4 2 1
+1 2 1
10 2 1
+10 2 1
+2 1 2
+2 1 2
3 3 3
+3 3 3
+4 2 1
+4 2 1
+5 5 5
6 3 3
-9 3 3
-2 1 2
+6 3 3
+7 1 2
7 1 2
8 4 2
-5 5 5
-3 3 3
-6 3 3
+8 4 2
+9 3 3
9 3 3
-1 2 1
-4 2 1
-10 2 1
DROP TABLE t1, t2;
# Another testcase for the above that still uses LooseScan:
create table t0(a int primary key);
@@ -2769,21 +2768,21 @@ WHERE (t1_1.a, t1_2.a) IN ( SELECT a, b FROM v1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1_1 ALL NULL NULL NULL NULL 11 Using where
1 PRIMARY t1_2 ALL NULL NULL NULL NULL 11
-1 PRIMARY <derived3> ref key0 key0 5 test.t1_1.a 2 Using where; FirstMatch(t1_2)
+1 PRIMARY <derived3> ref key0 key0 5 test.t1_1.a 1 Using where; FirstMatch(t1_2)
3 DERIVED t1 ALL NULL NULL NULL NULL 11
SELECT * FROM t1 AS t1_1, t1 AS t1_2
WHERE (t1_1.a, t1_2.a) IN ( SELECT a, b FROM v1 );
a b a b
-3 1 9 1
-5 8 4 0
-3 9 9 1
2 4 4 0
2 4 6 8
2 6 4 0
2 6 6 8
+3 1 9 1
+3 9 9 1
5 4 4 0
-7 7 7 7
5 4 4 0
+5 8 4 0
+7 7 7 7
DROP VIEW v1;
DROP TABLE t1;
set @@join_cache_level= @tmp_jcl_978479;
@@ -2927,9 +2926,9 @@ alias2.col_int_key = alias1.col_int_key
WHERE alias1.pk = 58 OR alias1.col_varchar_key = 'o'
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias1 index_merge PRIMARY,col_int_key,col_varchar_key PRIMARY,col_varchar_key 4,4 NULL 2 Using sort_union(PRIMARY,col_varchar_key); Using where; Start temporary
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY alias2 ALL col_int_key NULL NULL NULL 12 Range checked for each record (index map: 0x2); End temporary
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2
+1 PRIMARY alias1 index_merge PRIMARY,col_int_key,col_varchar_key PRIMARY,col_varchar_key 4,4 NULL 2 Using sort_union(PRIMARY,col_varchar_key); Using where
+1 PRIMARY alias2 ALL col_int_key NULL NULL NULL 12 Range checked for each record (index map: 0x2); FirstMatch(t2)
SELECT *
FROM t2
WHERE (field1) IN (SELECT alias1.col_varchar_nokey AS field1
@@ -3037,7 +3036,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 system NULL NULL NULL NULL 1 100.00 Using temporary; Using filesort
1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 5 100.00 Start temporary
1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t1.pk 1 100.00
-1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 Using where; End temporary
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 10.00 Using where; End temporary
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1`,'x' AS `c2` from `test`.`t1` semi join (`test`.`t1` left join `test`.`t3` on(`test`.`t1`.`c1` = `test`.`t3`.`c3`)) where `test`.`t1`.`pk` = `test`.`t1`.`pk` order by 'x',`test`.`t1`.`c1`
DROP TABLE t1,t2,t3;
@@ -3289,8 +3288,7 @@ explain extended
SELECT Id FROM t1 WHERE Id in (SELECT t1_Id FROM t2 WHERE t2.col1 IS NULL);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t2 ref col1 col1 5 const 2 100.00 Using index condition; Using where
+1 PRIMARY t2 ref col1 col1 5 const 2 50.00 Using index condition; Using where; FirstMatch(t1)
Warnings:
Note 1003 select 1 AS `Id` from (`test`.`t2`) where `test`.`t2`.`t1_Id` = 1 and `test`.`t2`.`col1` is null
DROP TABLE t1, t2;
diff --git a/mysql-test/main/subselect_sj.test b/mysql-test/main/subselect_sj.test
index e4d02ed666c..3c046c6321c 100644
--- a/mysql-test/main/subselect_sj.test
+++ b/mysql-test/main/subselect_sj.test
@@ -70,13 +70,13 @@ explain extended select * from t1 where a in (select t10.pk from t10, t12 where
--echo subqueries within outer joins go into ON expr.
# TODO: psergey: check if case conversions like those are ok (it broke on windows)
---replace_result a A b B
+--replace_result a A b B explain explain table table possible possible
explain extended
select * from t1 left join (t2 A, t2 B) on ( A.a= t1.a and B.a in (select pk from t10));
# TODO: psergey: check if case conversions like those are ok (it broke on windows)
--echo t2 should be wrapped into OJ-nest, so we have "t1 LJ (t2 J t10)"
---replace_result a A b B
+--replace_result a A b B explain explain table table possible possible
explain extended
select * from t1 left join t2 on (t2.a= t1.a and t2.a in (select pk from t10));
@@ -739,6 +739,7 @@ INSERT INTO t1 VALUES (1,'o','ffff','ffff','ffoo','ffff','ffff','ffff','ffff','f
CREATE TABLE t2 LIKE t1;
INSERT INTO t2 VALUES (1,'i','iiii','iiii','iiii','iiii','ffff','ffff','ffff','ffff','ffff',GeomFromText('POLYGON((0 0, 0 2, 2 2, 2 0, 0 0))')), (2,'f','ffff','ffff','ffff','ffff','ffff','ffff','ffff','ffff','ffff',GeomFromText('POLYGON((0 0, 0 2, 2 2, 2 0, 0 0))'));
+insert into t2 (pk) values (-1),(0);
# Test that materialization is skipped for semijoins where materialized
# table would contain GEOMETRY or different kinds of BLOB/TEXT columns
@@ -1425,7 +1426,7 @@ drop table t1,t2;
--echo # BUG#787299: Valgrind complains on a join query with two IN subqueries
--echo #
create table t1 (a int);
-insert into t1 values (1), (2), (3);
+insert into t1 values (1), (2), (3),(1000),(2000);
create table t2 as select * from t1;
select * from t1 A, t1 B
where A.a = B.a and A.a in (select a from t2 C) and B.a in (select a from t2 D);
@@ -1739,7 +1740,7 @@ DROP TABLE t1,t2,t3;
--echo #
--echo # BUG#803457: Wrong result with semijoin + view + outer join in maria-5.3-subqueries-mwl90
---echo # (Original testcase)
+--echo # (Original, slightly modified testcase)
--echo #
CREATE TABLE t1 (f1 int, f2 int );
@@ -1753,15 +1754,13 @@ INSERT INTO t3 VALUES (2,0),(4,0),(0,NULL),(4,0),(8,0);
CREATE TABLE t4 ( f2 int, KEY (f2) );
INSERT INTO t4 VALUES (0),(NULL);
-
-CREATE VIEW v4 AS SELECT DISTINCT f2 FROM t4 ;
+INSERT INTO t4 VALUES (0),(NULL),(-1),(-2),(-3);
--echo # The following must not have outer joins:
explain extended
-SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4);
-SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4);
+SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4 where f2 = 0 or f2 IS NULL);
+SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4 where f2 = 0 or f2 IS NULL);
-drop view v4;
drop table t1, t2, t3, t4;
--echo #
@@ -2249,6 +2248,7 @@ INSERT INTO t1 VALUES
CREATE TABLE t2 ( a INT, b INT, KEY(a)) ENGINE=MyISAM;
INSERT INTO t2 VALUES (3,20),(2,21),(3,22);
+--sorted_result
SELECT *
FROM t1 AS alias1, t1 AS alias2
WHERE ( alias1.c, alias2.c )
@@ -2292,16 +2292,17 @@ INSERT INTO t1 VALUES
CREATE TABLE t2 ( c INT, d INT, KEY(c) );
INSERT INTO t2 VALUES
- (1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1);
+ (1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1),(11,11);
analyze table t1,t2;
explain
SELECT a, b, d FROM t1, t2
WHERE ( b, d ) IN
- ( SELECT b, d FROM t1, t2 WHERE b = c );
+ ( SELECT b, d FROM t1 as t3, t2 as t4 WHERE b = c );
+--sorted_result
SELECT a, b, d FROM t1, t2
WHERE ( b, d ) IN
- ( SELECT b, d FROM t1, t2 WHERE b = c );
+ ( SELECT b, d FROM t1 as t3, t2 as t4 WHERE b = c );
DROP TABLE t1, t2;
@@ -2449,6 +2450,7 @@ EXPLAIN
SELECT * FROM t1 AS t1_1, t1 AS t1_2
WHERE (t1_1.a, t1_2.a) IN ( SELECT a, b FROM v1 );
+--sorted_result
SELECT * FROM t1 AS t1_1, t1 AS t1_2
WHERE (t1_1.a, t1_2.a) IN ( SELECT a, b FROM v1 );
diff --git a/mysql-test/main/subselect_sj2.result b/mysql-test/main/subselect_sj2.result
index 6643aa13f83..fa10a4aa066 100644
--- a/mysql-test/main/subselect_sj2.result
+++ b/mysql-test/main/subselect_sj2.result
@@ -25,11 +25,7 @@ key(b)
);
insert into t2 select a, a/2 from t0;
insert into t2 select a+10, a+10/2 from t0;
-select * from t1;
-a b
-1 1
-1 1
-2 2
+insert into t1 values (1030,30),(1031,31),(1032,32),(1033,33);
select * from t2;
a b
0 0
@@ -56,7 +52,7 @@ explain select * from t2 where b in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL b NULL NULL NULL 20
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 3
+2 MATERIALIZED t1 ALL NULL NULL NULL NULL 7
select * from t2 where b in (select a from t1);
a b
1 1
@@ -84,7 +80,7 @@ explain select * from t3 where b in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL b NULL NULL NULL 20
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 3
+2 MATERIALIZED t1 ALL NULL NULL NULL NULL 7
select * from t3 where b in (select a from t1);
a b pk1 pk2 pk3
1 1 1 1 1
@@ -106,11 +102,14 @@ primary key(pk1, pk2)
insert into t3 select
A.a + 10*B.a, A.a + 10*B.a, A.a + 10*B.a, A.a + 10*B.a
from t0 A, t0 B where B.a <5;
+analyze table t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
explain select * from t3 where b in (select a from t0);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 ALL b NULL NULL NULL #
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func #
-2 MATERIALIZED t0 ALL NULL NULL NULL NULL #
+1 PRIMARY t0 ALL NULL NULL NULL NULL # Using where; Start temporary
+1 PRIMARY t3 ref b b 5 test.t0.a # End temporary
select * from t3 where b in (select A.a+B.a from t0 A, t0 B where B.a<5);
a b pk1 pk2
0 0 0 0
@@ -131,20 +130,17 @@ set join_buffer_size= @save_join_buffer_size;
set max_heap_table_size= @save_max_heap_table_size;
explain select * from t1 where a in (select b from t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 index b b 5 NULL 20 Using index
-select * from t1;
-a b
-1 1
-1 1
-2 2
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
+1 PRIMARY t2 ref b b 5 test.t1.a 1 Using index; FirstMatch(t1)
select * from t1 where a in (select b from t2);
a b
1 1
1 1
2 2
drop table t1, t2, t3;
+#
+# Test join buffering
+#
set @save_join_buffer_size = @@join_buffer_size;
set join_buffer_size= 8192;
create table t1 (a int, filler1 binary(200), filler2 binary(200));
@@ -628,7 +624,7 @@ select * from t1 left join t2 on (t2.a= t1.a and t2.a in (select pk from t3));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where
-2 MATERIALIZED t3 index PRIMARY PRIMARY 4 NULL 10 Using index
+2 DEPENDENT SUBQUERY t3 unique_subquery PRIMARY PRIMARY 4 func 1 Using index
drop table t0, t1, t2, t3;
create table t1 (a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
@@ -740,9 +736,8 @@ alter table t3 add primary key(id), add key(a);
The following must use loose index scan over t3, key a:
explain select count(a) from t2 where a in ( SELECT a FROM t3);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index a a 5 NULL 1000 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t3 index a a 5 NULL 30000 Using index
+1 PRIMARY t2 index a a 5 NULL 1000 Using where; Using index
+1 PRIMARY t3 ref a a 5 test.t2.a 30 Using index; FirstMatch(t2)
select count(a) from t2 where a in ( SELECT a FROM t3);
count(a)
1000
@@ -770,8 +765,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 1
1 PRIMARY t3 ALL NULL NULL NULL NULL 2 FirstMatch(t2)
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 Using where
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 1
+1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where; FirstMatch(t3)
select 1 from t2 where
c2 in (select 1 from t3, t2) and
c1 in (select convert(c6,char(1)) from t2);
@@ -839,9 +833,9 @@ explain
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias1 const PRIMARY PRIMARY 4 const # Using index
+1 PRIMARY alias1 const PRIMARY PRIMARY 4 const #
1 PRIMARY alias2 index f12 f12 7 NULL # Using index; LooseScan
-1 PRIMARY t1 index NULL PRIMARY 4 NULL # Using index; FirstMatch(alias2)
+1 PRIMARY t1 ALL NULL NULL NULL NULL # FirstMatch(alias2)
1 PRIMARY t3 ALL NULL NULL NULL NULL # Using where; Using join buffer (flat, BNL join)
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
@@ -911,9 +905,9 @@ SELECT * FROM t3 LEFT JOIN (v1,t2) ON t3.a = t2.a
WHERE t3.b IN (SELECT b FROM t4);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where
1 PRIMARY <derived3> ALL NULL NULL NULL NULL 2
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
2 MATERIALIZED t4 ALL NULL NULL NULL NULL 2
3 DERIVED t1 ALL NULL NULL NULL NULL 1
SELECT * FROM t3 LEFT JOIN (v1,t2) ON t3.a = t2.a
@@ -950,33 +944,43 @@ INSERT INTO t1 VALUES
(11,'z',8),(12,'c',7),(13,'a',6),(14,'q',5),(15,'y',4),
(16,'n',3),(17,'r',2),(18,'v',1),(19,'p',0);
CREATE TABLE t2 (
-pk INT, d VARCHAR(1), e INT,
+pk INT, d VARCHAR(1), e INT, f int,
PRIMARY KEY(pk), KEY(d,e)
) ENGINE=InnoDB;
-INSERT INTO t2 VALUES
+INSERT INTO t2 (pk,d,e) VALUES
(1,'x',1),(2,'d',2),(3,'r',3),(4,'f',4),(5,'y',5),
(6,'u',6),(7,'m',7),(8,'k',8),(9,'o',9),(10,'w',1),
(11,'m',2),(12,'q',3),(13,'m',4),(14,'d',5),
(15,'g',6),(16,'x',7),(17,'f',8);
+update t2 set f=pk/2;
analyze table t1,t2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+# Original query, changed because of new optimizations
explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
WHERE a = d AND ( pk < 2 OR d = 'z' )
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
+1 PRIMARY t2 index PRIMARY,d d 9 NULL 17 Using where; Using index; LooseScan
+1 PRIMARY t1 ref a a 5 test.t2.d 1 Using where; Using index; FirstMatch(t2)
1 PRIMARY t1 ref b b 4 test.t2.d 1
-2 MATERIALIZED t2 index_merge PRIMARY,d d,PRIMARY 4,4 NULL 2 Using sort_union(d,PRIMARY); Using where
-2 MATERIALIZED t1 ref a a 5 test.t2.d 1 Using where; Using index
+explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
-WHERE a = d AND ( pk < 2 OR d = 'z' )
+WHERE a = d AND ( pk < 2 OR d = 'z' ) and f > 0
+);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 index_merge PRIMARY,d d,PRIMARY 4,4 NULL 2 Using sort_union(d,PRIMARY); Using where; Start temporary
+1 PRIMARY t1 ref a a 5 test.t2.d 1 Using where; Using index
+1 PRIMARY t1 ref b b 4 test.t2.d 1 End temporary
+SELECT * FROM t1 WHERE b IN (
+SELECT d FROM t2, t1
+WHERE a = d AND ( pk < 2 OR d = 'z' ) and f > 0
);
a b c
Warnings:
@@ -1224,10 +1228,10 @@ t1_pk1 t1_pk2 t3_i t3_c
explain
SELECT * FROM t1, t3 WHERE t3_c IN ( SELECT t1_pk2 FROM t4, t2 WHERE t2_c = t1_pk2 AND t2_i >= t3_i ) AND ( t1_pk1 = 'POL' );
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ref PRIMARY PRIMARY 5 const 1 Using where; Using index
-1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Start temporary
-1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY t4 index NULL PRIMARY 59 NULL 2 Using where; Using index; End temporary
+1 PRIMARY t1 ref PRIMARY PRIMARY 5 const 1 Using where
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t3)
DROP TABLE t1,t2,t3,t4;
#
# MDEV-6263: Wrong result when using IN subquery with order by
@@ -1345,9 +1349,9 @@ WHERE
T3_0_.t3idref= 1
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY T3_0_ ref PRIMARY,FK_T3_T2Id PRIMARY 8 const 3 Using index; Start temporary
+1 PRIMARY T3_0_ ref PRIMARY,FK_T3_T2Id PRIMARY 8 const 3 Start temporary
1 PRIMARY T2_1_ eq_ref PRIMARY,FK_T2_T1Id PRIMARY 8 test.T3_0_.t2idref 1
-1 PRIMARY T1_1_ eq_ref PRIMARY PRIMARY 8 test.T2_1_.t1idref 1 Using index
+1 PRIMARY T1_1_ eq_ref PRIMARY PRIMARY 8 test.T2_1_.t1idref 1
1 PRIMARY T2_0_ ref FK_T2_T1Id FK_T2_T1Id 8 test.T2_1_.t1idref 1 Using index; End temporary
drop table t3,t2,t1;
set optimizer_search_depth=@tmp7474;
diff --git a/mysql-test/main/subselect_sj2.test b/mysql-test/main/subselect_sj2.test
index 5b9ec409c5d..4ccdcc50a38 100644
--- a/mysql-test/main/subselect_sj2.test
+++ b/mysql-test/main/subselect_sj2.test
@@ -2,13 +2,15 @@
# DuplicateElimination strategy test
#
+--source include/have_innodb.inc
+--source include/have_sequence.inc
+
set @innodb_stats_persistent_save= @@innodb_stats_persistent;
set @innodb_stats_persistent_sample_pages_save=
@@innodb_stats_persistent_sample_pages;
set global innodb_stats_persistent= 1;
set global innodb_stats_persistent_sample_pages=100;
---source include/have_innodb.inc
set @subselect_sj2_tmp= @@optimizer_switch;
set optimizer_switch='semijoin=on,firstmatch=on,loosescan=on';
@@ -48,7 +50,7 @@ create table t2 (
insert into t2 select a, a/2 from t0;
insert into t2 select a+10, a+10/2 from t0;
-select * from t1;
+insert into t1 values (1030,30),(1031,31),(1032,32),(1033,33);
select * from t2;
explain select * from t2 where b in (select a from t1);
select * from t2 where b in (select a from t1);
@@ -87,6 +89,7 @@ insert into t3 select
A.a + 10*B.a, A.a + 10*B.a, A.a + 10*B.a, A.a + 10*B.a
from t0 A, t0 B where B.a <5;
+analyze table t3 persistent for all;
--replace_column 9 #
explain select * from t3 where b in (select a from t0);
select * from t3 where b in (select A.a+B.a from t0 A, t0 B where B.a<5);
@@ -96,15 +99,15 @@ set max_heap_table_size= @save_max_heap_table_size;
# O2I join orders, with shortcutting:
explain select * from t1 where a in (select b from t2);
-select * from t1;
select * from t1 where a in (select b from t2);
drop table t1, t2, t3;
# (no need for anything in range/index_merge/DS-MRR)
-#
-# Test join buffering
-#
+--echo #
+--echo # Test join buffering
+--echo #
+
set @save_join_buffer_size = @@join_buffer_size;
set join_buffer_size= 8192;
@@ -255,6 +258,7 @@ INSERT INTO t3 VALUES
# Disable materialization to avoid races between query plans
set @bug35674_save_optimizer_switch=@@optimizer_switch;
set optimizer_switch='materialization=off';
+
EXPLAIN
SELECT Name FROM t2
WHERE t2.Code IN (SELECT Country FROM t1 WHERE Population > 5000000)
@@ -1121,26 +1125,35 @@ INSERT INTO t1 VALUES
(16,'n',3),(17,'r',2),(18,'v',1),(19,'p',0);
CREATE TABLE t2 (
- pk INT, d VARCHAR(1), e INT,
+ pk INT, d VARCHAR(1), e INT, f int,
PRIMARY KEY(pk), KEY(d,e)
) ENGINE=InnoDB;
-INSERT INTO t2 VALUES
+INSERT INTO t2 (pk,d,e) VALUES
(1,'x',1),(2,'d',2),(3,'r',3),(4,'f',4),(5,'y',5),
(6,'u',6),(7,'m',7),(8,'k',8),(9,'o',9),(10,'w',1),
(11,'m',2),(12,'q',3),(13,'m',4),(14,'d',5),
(15,'g',6),(16,'x',7),(17,'f',8);
+update t2 set f=pk/2;
analyze table t1,t2;
+--echo # Original query, changed because of new optimizations
explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
WHERE a = d AND ( pk < 2 OR d = 'z' )
);
+
+explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
- WHERE a = d AND ( pk < 2 OR d = 'z' )
+ WHERE a = d AND ( pk < 2 OR d = 'z' ) and f > 0
+);
+
+SELECT * FROM t1 WHERE b IN (
+ SELECT d FROM t2, t1
+ WHERE a = d AND ( pk < 2 OR d = 'z' ) and f > 0
);
DROP TABLE t1, t2;
@@ -1311,8 +1324,6 @@ SELECT * FROM t1 WHERE 9 IN ( SELECT b FROM t2 WHERE 1 IN ( SELECT MIN(c) FROM t
DROP TABLE t1,t2,t3;
---source include/have_innodb.inc
-
--disable_warnings
DROP TABLE IF EXISTS t1,t2,t3,t4;
--enable_warnings
diff --git a/mysql-test/main/subselect_sj2_jcl6.result b/mysql-test/main/subselect_sj2_jcl6.result
index 83abb68ca51..6ccec99ba5e 100644
--- a/mysql-test/main/subselect_sj2_jcl6.result
+++ b/mysql-test/main/subselect_sj2_jcl6.result
@@ -36,11 +36,7 @@ key(b)
);
insert into t2 select a, a/2 from t0;
insert into t2 select a+10, a+10/2 from t0;
-select * from t1;
-a b
-1 1
-1 1
-2 2
+insert into t1 values (1030,30),(1031,31),(1032,32),(1033,33);
select * from t2;
a b
0 0
@@ -65,9 +61,8 @@ a b
19 14
explain select * from t2 where b in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL b NULL NULL NULL 20
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 3
+1 PRIMARY t2 ALL b NULL NULL NULL 20 Using where
+1 PRIMARY t1 hash_ALL NULL #hash#$hj 5 test.t2.b 7 Using where; FirstMatch(t2); Using join buffer (flat, BNLH join)
select * from t2 where b in (select a from t1);
a b
1 1
@@ -93,9 +88,8 @@ test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
explain select * from t3 where b in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 ALL b NULL NULL NULL 20
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 3
+1 PRIMARY t3 ALL b NULL NULL NULL 20 Using where
+1 PRIMARY t1 hash_ALL NULL #hash#$hj 5 test.t3.b 7 Using where; FirstMatch(t3); Using join buffer (flat, BNLH join)
select * from t3 where b in (select a from t1);
a b pk1 pk2 pk3
1 1 1 1 1
@@ -117,11 +111,14 @@ primary key(pk1, pk2)
insert into t3 select
A.a + 10*B.a, A.a + 10*B.a, A.a + 10*B.a, A.a + 10*B.a
from t0 A, t0 B where B.a <5;
+analyze table t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
explain select * from t3 where b in (select a from t0);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 ALL b NULL NULL NULL #
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func #
-2 MATERIALIZED t0 ALL NULL NULL NULL NULL #
+1 PRIMARY t0 ALL NULL NULL NULL NULL # Using where; Start temporary
+1 PRIMARY t3 ref b b 5 test.t0.a # End temporary; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
select * from t3 where b in (select A.a+B.a from t0 A, t0 B where B.a<5);
a b pk1 pk2
0 0 0 0
@@ -142,20 +139,17 @@ set join_buffer_size= @save_join_buffer_size;
set max_heap_table_size= @save_max_heap_table_size;
explain select * from t1 where a in (select b from t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 index b b 5 NULL 20 Using index
-select * from t1;
-a b
-1 1
-1 1
-2 2
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
+1 PRIMARY t2 ref b b 5 test.t1.a 1 Using index; FirstMatch(t1)
select * from t1 where a in (select b from t2);
a b
1 1
1 1
2 2
drop table t1, t2, t3;
+#
+# Test join buffering
+#
set @save_join_buffer_size = @@join_buffer_size;
set join_buffer_size= 8192;
create table t1 (a int, filler1 binary(200), filler2 binary(200));
@@ -171,9 +165,8 @@ explain select
a, mid(filler1, 1,10), length(filler1)=length(filler2) as Z
from t1 ot where a in (select a from t2 it);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY ot ALL NULL NULL NULL NULL 32
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED it ALL NULL NULL NULL NULL 22
+1 PRIMARY ot ALL NULL NULL NULL NULL 32 Using where
+1 PRIMARY it hash_ALL NULL #hash#$hj 5 test.ot.a 22 Using where; FirstMatch(ot); Using join buffer (flat, BNLH join)
select
a, mid(filler1, 1,10), length(filler1)=length(filler2) as Z
from t1 ot where a in (select a from t2 it);
@@ -197,16 +190,15 @@ a mid(filler1, 1,10) Z
16 filler1234 1
17 filler1234 1
18 filler1234 1
-19 filler1234 1
2 duplicate 1
18 duplicate 1
+19 filler1234 1
explain select
a, mid(filler1, 1,10), length(filler1)=length(filler2)
from t2 ot where a in (select a from t1 it);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY ot ALL NULL NULL NULL NULL 22
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED it ALL NULL NULL NULL NULL 32
+1 PRIMARY ot ALL NULL NULL NULL NULL 22 Using where
+1 PRIMARY it hash_ALL NULL #hash#$hj 5 test.ot.a 32 Using where; FirstMatch(ot); Using join buffer (flat, BNLH join)
select
a, mid(filler1, 1,10), length(filler1)=length(filler2)
from t2 ot where a in (select a from t1 it);
@@ -230,8 +222,8 @@ a mid(filler1, 1,10) length(filler1)=length(filler2)
16 filler1234 1
17 filler1234 1
18 filler1234 1
-19 filler1234 1
3 duplicate 1
+19 filler1234 1
19 duplicate 1
insert into t1 select a+20, 'filler123456', 'filler123456' from t0;
insert into t1 select a+20, 'filler123456', 'filler123456' from t0;
@@ -272,9 +264,8 @@ explain select
a, mid(filler1, 1,10), length(filler1)=length(filler2)
from t2 ot where a in (select a from t1 it);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY ot ALL NULL NULL NULL NULL 22
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED it ALL NULL NULL NULL NULL 52
+1 PRIMARY ot ALL NULL NULL NULL NULL 22 Using where
+1 PRIMARY it hash_ALL NULL #hash#$hj 5 test.ot.a 52 Using where; FirstMatch(ot); Using join buffer (flat, BNLH join)
select
a, mid(filler1, 1,10), length(filler1)=length(filler2)
from t2 ot where a in (select a from t1 it);
@@ -298,8 +289,8 @@ a mid(filler1, 1,10) length(filler1)=length(filler2)
16 filler1234 1
17 filler1234 1
18 filler1234 1
-19 filler1234 1
3 duplicate 1
+19 filler1234 1
19 duplicate 1
drop table t1, t2;
create table t1 (a int, b int, key(a));
@@ -641,7 +632,7 @@ select * from t1 left join t2 on (t2.a= t1.a and t2.a in (select pk from t3));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
1 PRIMARY t2 hash_ALL NULL #hash#$hj 5 test.t1.a 3 Using where; Using join buffer (flat, BNLH join)
-2 MATERIALIZED t3 index PRIMARY PRIMARY 4 NULL 10 Using index
+2 DEPENDENT SUBQUERY t3 unique_subquery PRIMARY PRIMARY 4 func 1 Using index
drop table t0, t1, t2, t3;
create table t1 (a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
@@ -753,9 +744,8 @@ alter table t3 add primary key(id), add key(a);
The following must use loose index scan over t3, key a:
explain select count(a) from t2 where a in ( SELECT a FROM t3);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index a a 5 NULL 1000 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t3 index a a 5 NULL 30000 Using index
+1 PRIMARY t2 index a a 5 NULL 1000 Using where; Using index
+1 PRIMARY t3 ref a a 5 test.t2.a 30 Using index; FirstMatch(t2)
select count(a) from t2 where a in ( SELECT a FROM t3);
count(a)
1000
@@ -783,8 +773,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
1 PRIMARY t3 ALL NULL NULL NULL NULL 2 FirstMatch(t2); Using join buffer (incremental, BNL join)
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 Using where
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 1
+1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where; FirstMatch(t3); Using join buffer (incremental, BNL join)
select 1 from t2 where
c2 in (select 1 from t3, t2) and
c1 in (select convert(c6,char(1)) from t2);
@@ -852,9 +841,9 @@ explain
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias1 const PRIMARY PRIMARY 4 const # Using index
+1 PRIMARY alias1 const PRIMARY PRIMARY 4 const #
1 PRIMARY alias2 index f12 f12 7 NULL # Using index; LooseScan
-1 PRIMARY t1 index NULL PRIMARY 4 NULL # Using index; FirstMatch(alias2)
+1 PRIMARY t1 ALL NULL NULL NULL NULL # FirstMatch(alias2)
1 PRIMARY t3 ALL NULL NULL NULL NULL # Using where; Using join buffer (flat, BNL join)
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
@@ -924,9 +913,9 @@ SELECT * FROM t3 LEFT JOIN (v1,t2) ON t3.a = t2.a
WHERE t3.b IN (SELECT b FROM t4);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
1 PRIMARY t2 hash_ALL NULL #hash#$hj 4 test.t3.a 1 Using where; Using join buffer (flat, BNLH join)
1 PRIMARY <derived3> ALL NULL NULL NULL NULL 2 Using join buffer (incremental, BNL join)
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
2 MATERIALIZED t4 ALL NULL NULL NULL NULL 2
3 DERIVED t1 ALL NULL NULL NULL NULL 1
SELECT * FROM t3 LEFT JOIN (v1,t2) ON t3.a = t2.a
@@ -963,33 +952,43 @@ INSERT INTO t1 VALUES
(11,'z',8),(12,'c',7),(13,'a',6),(14,'q',5),(15,'y',4),
(16,'n',3),(17,'r',2),(18,'v',1),(19,'p',0);
CREATE TABLE t2 (
-pk INT, d VARCHAR(1), e INT,
+pk INT, d VARCHAR(1), e INT, f int,
PRIMARY KEY(pk), KEY(d,e)
) ENGINE=InnoDB;
-INSERT INTO t2 VALUES
+INSERT INTO t2 (pk,d,e) VALUES
(1,'x',1),(2,'d',2),(3,'r',3),(4,'f',4),(5,'y',5),
(6,'u',6),(7,'m',7),(8,'k',8),(9,'o',9),(10,'w',1),
(11,'m',2),(12,'q',3),(13,'m',4),(14,'d',5),
(15,'g',6),(16,'x',7),(17,'f',8);
+update t2 set f=pk/2;
analyze table t1,t2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+# Original query, changed because of new optimizations
explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
WHERE a = d AND ( pk < 2 OR d = 'z' )
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
+1 PRIMARY t2 index PRIMARY,d d 9 NULL 17 Using where; Using index; LooseScan
+1 PRIMARY t1 ref a a 5 test.t2.d 1 Using where; Using index; FirstMatch(t2)
1 PRIMARY t1 ref b b 4 test.t2.d 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-2 MATERIALIZED t2 index_merge PRIMARY,d d,PRIMARY 4,4 NULL 2 Using sort_union(d,PRIMARY); Using where
-2 MATERIALIZED t1 ref a a 5 test.t2.d 1 Using where; Using index
+explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
-WHERE a = d AND ( pk < 2 OR d = 'z' )
+WHERE a = d AND ( pk < 2 OR d = 'z' ) and f > 0
+);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 index_merge PRIMARY,d d,PRIMARY 4,4 NULL 2 Using sort_union(d,PRIMARY); Using where; Start temporary
+1 PRIMARY t1 ref a a 5 test.t2.d 1 Using where; Using index
+1 PRIMARY t1 ref b b 4 test.t2.d 1 End temporary; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+SELECT * FROM t1 WHERE b IN (
+SELECT d FROM t2, t1
+WHERE a = d AND ( pk < 2 OR d = 'z' ) and f > 0
);
a b c
Warnings:
@@ -1237,10 +1236,10 @@ t1_pk1 t1_pk2 t3_i t3_c
explain
SELECT * FROM t1, t3 WHERE t3_c IN ( SELECT t1_pk2 FROM t4, t2 WHERE t2_c = t1_pk2 AND t2_i >= t3_i ) AND ( t1_pk1 = 'POL' );
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ref PRIMARY PRIMARY 5 const 1 Using where; Using index
-1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Start temporary; Using join buffer (flat, BNL join)
-1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (incremental, BNL join)
-1 PRIMARY t4 hash_index NULL #hash#$hj:PRIMARY 54:59 test.t3.t3_c 2 Using where; Using index; End temporary; Using join buffer (incremental, BNLH join)
+1 PRIMARY t1 ref PRIMARY PRIMARY 5 const 1 Using where
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join)
+1 PRIMARY t4 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t3); Using join buffer (incremental, BNL join)
DROP TABLE t1,t2,t3,t4;
#
# MDEV-6263: Wrong result when using IN subquery with order by
@@ -1358,9 +1357,9 @@ WHERE
T3_0_.t3idref= 1
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY T3_0_ ref PRIMARY,FK_T3_T2Id PRIMARY 8 const 3 Using index; Start temporary
+1 PRIMARY T3_0_ ref PRIMARY,FK_T3_T2Id PRIMARY 8 const 3 Start temporary
1 PRIMARY T2_1_ eq_ref PRIMARY,FK_T2_T1Id PRIMARY 8 test.T3_0_.t2idref 1 Using join buffer (flat, BKA join); Key-ordered scan
-1 PRIMARY T1_1_ eq_ref PRIMARY PRIMARY 8 test.T2_1_.t1idref 1 Using index
+1 PRIMARY T1_1_ eq_ref PRIMARY PRIMARY 8 test.T2_1_.t1idref 1 Using join buffer (incremental, BKA join); Key-ordered scan
1 PRIMARY T2_0_ ref FK_T2_T1Id FK_T2_T1Id 8 test.T2_1_.t1idref 1 Using index; End temporary
drop table t3,t2,t1;
set optimizer_search_depth=@tmp7474;
@@ -1439,10 +1438,9 @@ SELECT t3.* FROM t1 JOIN t3 ON t3.b = t1.b
WHERE c IN (SELECT t4.b FROM t4 JOIN t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 1 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
+1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
+1 PRIMARY t4 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t3); Using join buffer (incremental, BNL join)
1 PRIMARY t1 ref b b 4 test.t3.b 1 Using index
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 1
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join)
SELECT t3.* FROM t1 JOIN t3 ON t3.b = t1.b
WHERE c IN (SELECT t4.b FROM t4 JOIN t2);
b c
@@ -1460,18 +1458,19 @@ INSERT INTO t2 VALUES (8);
CREATE TABLE t3 (pk int PRIMARY KEY, a int);
INSERT INTO t3 VALUES (1, 6), (2, 8);
CREATE TABLE t4 (b int) ENGINE=InnoDB;
-INSERT INTO t4 VALUES (2);
+INSERT INTO t4 VALUES (2),(88),(99);
+insert into t2 select seq from seq_100_to_200;
set @tmp_optimizer_switch=@@optimizer_switch;
SET optimizer_switch = 'semijoin_with_cache=on';
SET join_cache_level = 2;
EXPLAIN
SELECT * FROM t1, t2 WHERE b IN (SELECT a FROM t3, t4 WHERE b = pk);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join)
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 1 Using where
-2 MATERIALIZED t3 eq_ref PRIMARY PRIMARY 4 test.t4.b 1
+1 PRIMARY t1 ALL NULL NULL NULL NULL #
+1 PRIMARY t2 ALL NULL NULL NULL NULL # Using join buffer (flat, BNL join)
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func #
+2 MATERIALIZED t4 ALL NULL NULL NULL NULL # Using where
+2 MATERIALIZED t3 eq_ref PRIMARY PRIMARY 4 test.t4.b #
SELECT * FROM t1, t2 WHERE b IN (SELECT a FROM t3, t4 WHERE b = pk);
pk a b
1 6 8
@@ -1492,9 +1491,8 @@ SET join_cache_level = 3;
EXPLAIN
SELECT * FROM t1 WHERE b IN (SELECT a FROM t2 GROUP BY a);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 1
+1 PRIMARY t1 ALL NULL NULL NULL NULL 1 Using where
+1 PRIMARY t2 hash_ALL NULL #hash#$hj 4 test.t1.b 1 Using where; FirstMatch(t1); Using join buffer (flat, BNLH join)
SELECT * FROM t1 WHERE b IN (SELECT a FROM t2 GROUP BY a);
a b
v v
diff --git a/mysql-test/main/subselect_sj2_jcl6.test b/mysql-test/main/subselect_sj2_jcl6.test
index a0c8a6c0f04..1001a213768 100644
--- a/mysql-test/main/subselect_sj2_jcl6.test
+++ b/mysql-test/main/subselect_sj2_jcl6.test
@@ -66,13 +66,16 @@ INSERT INTO t2 VALUES (8);
CREATE TABLE t3 (pk int PRIMARY KEY, a int);
INSERT INTO t3 VALUES (1, 6), (2, 8);
CREATE TABLE t4 (b int) ENGINE=InnoDB;
-INSERT INTO t4 VALUES (2);
+INSERT INTO t4 VALUES (2),(88),(99);
+
+insert into t2 select seq from seq_100_to_200;
set @tmp_optimizer_switch=@@optimizer_switch;
SET optimizer_switch = 'semijoin_with_cache=on';
SET join_cache_level = 2;
+--replace_column 9 #
EXPLAIN
SELECT * FROM t1, t2 WHERE b IN (SELECT a FROM t3, t4 WHERE b = pk);
SELECT * FROM t1, t2 WHERE b IN (SELECT a FROM t3, t4 WHERE b = pk);
diff --git a/mysql-test/main/subselect_sj2_mat.result b/mysql-test/main/subselect_sj2_mat.result
index 5d7e7d49da2..5bc7751eeaa 100644
--- a/mysql-test/main/subselect_sj2_mat.result
+++ b/mysql-test/main/subselect_sj2_mat.result
@@ -27,11 +27,7 @@ key(b)
);
insert into t2 select a, a/2 from t0;
insert into t2 select a+10, a+10/2 from t0;
-select * from t1;
-a b
-1 1
-1 1
-2 2
+insert into t1 values (1030,30),(1031,31),(1032,32),(1033,33);
select * from t2;
a b
0 0
@@ -58,7 +54,7 @@ explain select * from t2 where b in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL b NULL NULL NULL 20
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 3
+2 MATERIALIZED t1 ALL NULL NULL NULL NULL 7
select * from t2 where b in (select a from t1);
a b
1 1
@@ -86,7 +82,7 @@ explain select * from t3 where b in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL b NULL NULL NULL 20
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 3
+2 MATERIALIZED t1 ALL NULL NULL NULL NULL 7
select * from t3 where b in (select a from t1);
a b pk1 pk2 pk3
1 1 1 1 1
@@ -108,11 +104,14 @@ primary key(pk1, pk2)
insert into t3 select
A.a + 10*B.a, A.a + 10*B.a, A.a + 10*B.a, A.a + 10*B.a
from t0 A, t0 B where B.a <5;
+analyze table t3 persistent for all;
+Table Op Msg_type Msg_text
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
explain select * from t3 where b in (select a from t0);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 ALL b NULL NULL NULL #
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func #
-2 MATERIALIZED t0 ALL NULL NULL NULL NULL #
+1 PRIMARY t0 ALL NULL NULL NULL NULL # Using where; Start temporary
+1 PRIMARY t3 ref b b 5 test.t0.a # End temporary
select * from t3 where b in (select A.a+B.a from t0 A, t0 B where B.a<5);
a b pk1 pk2
0 0 0 0
@@ -133,20 +132,17 @@ set join_buffer_size= @save_join_buffer_size;
set max_heap_table_size= @save_max_heap_table_size;
explain select * from t1 where a in (select b from t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 index b b 5 NULL 20 Using index
-select * from t1;
-a b
-1 1
-1 1
-2 2
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
+1 PRIMARY t2 ref b b 5 test.t1.a 1 Using index; FirstMatch(t1)
select * from t1 where a in (select b from t2);
a b
1 1
1 1
2 2
drop table t1, t2, t3;
+#
+# Test join buffering
+#
set @save_join_buffer_size = @@join_buffer_size;
set join_buffer_size= 8192;
create table t1 (a int, filler1 binary(200), filler2 binary(200));
@@ -630,7 +626,7 @@ select * from t1 left join t2 on (t2.a= t1.a and t2.a in (select pk from t3));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where
-2 MATERIALIZED t3 index PRIMARY PRIMARY 4 NULL 10 Using index
+2 DEPENDENT SUBQUERY t3 unique_subquery PRIMARY PRIMARY 4 func 1 Using index
drop table t0, t1, t2, t3;
create table t1 (a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
@@ -742,9 +738,8 @@ alter table t3 add primary key(id), add key(a);
The following must use loose index scan over t3, key a:
explain select count(a) from t2 where a in ( SELECT a FROM t3);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index a a 5 NULL 1000 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t3 index a a 5 NULL 30000 Using index
+1 PRIMARY t2 index a a 5 NULL 1000 Using where; Using index
+1 PRIMARY t3 ref a a 5 test.t2.a 30 Using index; FirstMatch(t2)
select count(a) from t2 where a in ( SELECT a FROM t3);
count(a)
1000
@@ -772,8 +767,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 1
1 PRIMARY t3 ALL NULL NULL NULL NULL 2 FirstMatch(t2)
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 Using where
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 1
+1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where; FirstMatch(t3)
select 1 from t2 where
c2 in (select 1 from t3, t2) and
c1 in (select convert(c6,char(1)) from t2);
@@ -841,9 +835,9 @@ explain
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias1 const PRIMARY PRIMARY 4 const # Using index
+1 PRIMARY alias1 const PRIMARY PRIMARY 4 const #
1 PRIMARY alias2 index f12 f12 7 NULL # Using index; LooseScan
-1 PRIMARY t1 index NULL PRIMARY 4 NULL # Using index; FirstMatch(alias2)
+1 PRIMARY t1 ALL NULL NULL NULL NULL # FirstMatch(alias2)
1 PRIMARY t3 ALL NULL NULL NULL NULL # Using where; Using join buffer (flat, BNL join)
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
@@ -913,9 +907,9 @@ SELECT * FROM t3 LEFT JOIN (v1,t2) ON t3.a = t2.a
WHERE t3.b IN (SELECT b FROM t4);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where
1 PRIMARY <derived3> ALL NULL NULL NULL NULL 2
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
2 MATERIALIZED t4 ALL NULL NULL NULL NULL 2
3 DERIVED t1 ALL NULL NULL NULL NULL 1
SELECT * FROM t3 LEFT JOIN (v1,t2) ON t3.a = t2.a
@@ -952,33 +946,43 @@ INSERT INTO t1 VALUES
(11,'z',8),(12,'c',7),(13,'a',6),(14,'q',5),(15,'y',4),
(16,'n',3),(17,'r',2),(18,'v',1),(19,'p',0);
CREATE TABLE t2 (
-pk INT, d VARCHAR(1), e INT,
+pk INT, d VARCHAR(1), e INT, f int,
PRIMARY KEY(pk), KEY(d,e)
) ENGINE=InnoDB;
-INSERT INTO t2 VALUES
+INSERT INTO t2 (pk,d,e) VALUES
(1,'x',1),(2,'d',2),(3,'r',3),(4,'f',4),(5,'y',5),
(6,'u',6),(7,'m',7),(8,'k',8),(9,'o',9),(10,'w',1),
(11,'m',2),(12,'q',3),(13,'m',4),(14,'d',5),
(15,'g',6),(16,'x',7),(17,'f',8);
+update t2 set f=pk/2;
analyze table t1,t2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+# Original query, changed because of new optimizations
explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
WHERE a = d AND ( pk < 2 OR d = 'z' )
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
+1 PRIMARY t2 index PRIMARY,d d 9 NULL 17 Using where; Using index; LooseScan
+1 PRIMARY t1 ref a a 5 test.t2.d 1 Using where; Using index; FirstMatch(t2)
1 PRIMARY t1 ref b b 4 test.t2.d 1
-2 MATERIALIZED t2 index_merge PRIMARY,d d,PRIMARY 4,4 NULL 2 Using sort_union(d,PRIMARY); Using where
-2 MATERIALIZED t1 ref a a 5 test.t2.d 1 Using where; Using index
+explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
-WHERE a = d AND ( pk < 2 OR d = 'z' )
+WHERE a = d AND ( pk < 2 OR d = 'z' ) and f > 0
+);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 index_merge PRIMARY,d d,PRIMARY 4,4 NULL 2 Using sort_union(d,PRIMARY); Using where; Start temporary
+1 PRIMARY t1 ref a a 5 test.t2.d 1 Using where; Using index
+1 PRIMARY t1 ref b b 4 test.t2.d 1 End temporary
+SELECT * FROM t1 WHERE b IN (
+SELECT d FROM t2, t1
+WHERE a = d AND ( pk < 2 OR d = 'z' ) and f > 0
);
a b c
Warnings:
@@ -1226,10 +1230,10 @@ t1_pk1 t1_pk2 t3_i t3_c
explain
SELECT * FROM t1, t3 WHERE t3_c IN ( SELECT t1_pk2 FROM t4, t2 WHERE t2_c = t1_pk2 AND t2_i >= t3_i ) AND ( t1_pk1 = 'POL' );
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ref PRIMARY PRIMARY 5 const 1 Using where; Using index
-1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Start temporary
-1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY t4 index NULL PRIMARY 59 NULL 2 Using where; Using index; End temporary
+1 PRIMARY t1 ref PRIMARY PRIMARY 5 const 1 Using where
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 1 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t3)
DROP TABLE t1,t2,t3,t4;
#
# MDEV-6263: Wrong result when using IN subquery with order by
@@ -1347,9 +1351,9 @@ WHERE
T3_0_.t3idref= 1
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY T3_0_ ref PRIMARY,FK_T3_T2Id PRIMARY 8 const 3 Using index; Start temporary
+1 PRIMARY T3_0_ ref PRIMARY,FK_T3_T2Id PRIMARY 8 const 3 Start temporary
1 PRIMARY T2_1_ eq_ref PRIMARY,FK_T2_T1Id PRIMARY 8 test.T3_0_.t2idref 1
-1 PRIMARY T1_1_ eq_ref PRIMARY PRIMARY 8 test.T2_1_.t1idref 1 Using index
+1 PRIMARY T1_1_ eq_ref PRIMARY PRIMARY 8 test.T2_1_.t1idref 1
1 PRIMARY T2_0_ ref FK_T2_T1Id FK_T2_T1Id 8 test.T2_1_.t1idref 1 Using index; End temporary
drop table t3,t2,t1;
set optimizer_search_depth=@tmp7474;
@@ -1511,8 +1515,7 @@ t3.sack_id = 33479 AND t3.kit_id = 6;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ref PRIMARY PRIMARY 5 const,const 5 Using index
1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t3.cat_id 1 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t4 index cat_id cat_id 4 NULL 19 Using index
+1 PRIMARY t4 ref cat_id cat_id 4 test.t3.cat_id 1 Using index; FirstMatch(t1)
SELECT count(*) FROM t1, t3
WHERE t1.cat_id = t3.cat_id AND
t3.cat_id IN (SELECT cat_id FROM t4) AND
@@ -1527,8 +1530,7 @@ t3.sack_id = 33479 AND t3.kit_id = 6;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ref PRIMARY PRIMARY 5 const,const 5 Using index
1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t3.cat_id 1 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 Using where
-2 MATERIALIZED t2 index cat_id cat_id 4 NULL 19 Using index
+1 PRIMARY t2 ref cat_id cat_id 4 test.t3.cat_id 2 Using where; Using index; FirstMatch(t1)
SELECT count(*) FROM t1, t3
WHERE t1.cat_id = t3.cat_id AND
t3.cat_id IN (SELECT cat_id FROM t2) AND
@@ -1557,7 +1559,7 @@ WHERE ( b1, b1 ) IN ( SELECT a4, b4 FROM t3, t4);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary
1 PRIMARY t1 ref idx idx 2 test.t4.a4 1 100.00 Using index
-1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 16.67 End temporary; Using join buffer (flat, BNL join)
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` semi join (`test`.`t3` join `test`.`t4`) left join `test`.`t2` on(`test`.`t2`.`a2` = `test`.`t4`.`a4`) where `test`.`t4`.`b4` = `test`.`t4`.`a4` and `test`.`t1`.`b1` = `test`.`t4`.`a4`
@@ -1575,7 +1577,7 @@ WHERE ( b1, b1 ) IN ( SELECT a4, b4 FROM t3, t4);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary
1 PRIMARY t1 ref idx idx 2 test.t4.a4 1 100.00 Using index
-1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 16.67 End temporary; Using join buffer (flat, BNL join)
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` semi join (`test`.`t3` join `test`.`t4`) left join `test`.`t2` on(`test`.`t2`.`a2` = `test`.`t4`.`a4`) where `test`.`t4`.`b4` = `test`.`t4`.`a4` and `test`.`t1`.`b1` = `test`.`t4`.`a4`
@@ -1610,7 +1612,7 @@ WHERE c1 IN ( SELECT c4 FROM t3,t4 WHERE c3 = c4);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
1 PRIMARY t4 ALL NULL NULL NULL NULL 2 100.00 Using where; Start temporary; Using join buffer (flat, BNL join)
-1 PRIMARY t3 ALL NULL NULL NULL NULL 10 100.00 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 10 5.00 Using where; End temporary; Using join buffer (flat, BNL join)
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`c1` AS `c1`,`test`.`t2`.`c2` AS `c2` from `test`.`t1` semi join (`test`.`t3` join `test`.`t4`) left join `test`.`t2` on(`test`.`t2`.`c2` = `test`.`t1`.`c1` or `test`.`t1`.`c1` > 'z') where `test`.`t4`.`c4` = `test`.`t1`.`c1` and `test`.`t3`.`c3` = `test`.`t1`.`c1`
@@ -1714,11 +1716,10 @@ i
explain extended
select * from t1 where (rand() < 0) and i in (select i from t2);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 10 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 10 10.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
-Note 1003 select `test`.`t1`.`i` AS `i` from `test`.`t1` semi join (`test`.`t2`) where rand() < 0
+Note 1003 select `test`.`t1`.`i` AS `i` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`i` = `test`.`t1`.`i` and rand() < 0
drop table t1,t2;
set optimizer_switch=@save_optimizer_switch;
#
@@ -1727,7 +1728,13 @@ set optimizer_switch=@save_optimizer_switch;
CREATE TABLE t1 (f1 varchar(8), KEY(f1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES ('qux'),('foo');
CREATE TABLE t2 (f2 varchar(8)) ENGINE=InnoDB;
-INSERT INTO t2 VALUES ('bar'),('foo'),('qux');
+INSERT INTO t2 VALUES ('bar'),('foo'),('qux'),('qq1'),('qq2');
+analyze table t1,t2 persistent for all;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
SELECT f1 FROM t1
WHERE f1 IN ( SELECT f2 FROM t2 WHERE f2 > 'bar' )
HAVING f1 != 'foo'
@@ -1739,9 +1746,8 @@ WHERE f1 IN ( SELECT f2 FROM t2 WHERE f2 > 'bar' )
HAVING f1 != 'foo'
ORDER BY f1;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 range f1 f1 11 NULL 2 Using where; Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 11 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY t1 range f1 f1 11 NULL 2 Using where; Using index; Using temporary; Using filesort
+1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
DROP TABLE t1,t2;
#
# MDEV-16225: wrong resultset from query with semijoin=on
@@ -1779,7 +1785,7 @@ OR
(t.id IN (0,4,12,13,1,10,3,11))
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t index PRIMARY PRIMARY 4 NULL 114 Using where; Using index
+1 PRIMARY t ALL PRIMARY NULL NULL NULL 114 Using where
2 MATERIALIZED A ALL PRIMARY NULL NULL NULL 114
2 MATERIALIZED <subquery3> eq_ref distinct_key distinct_key 67 func 1
3 MATERIALIZED B range PRIMARY PRIMARY 4 NULL 8 Using where
@@ -1838,16 +1844,15 @@ explain
SELECT t2.id FROM t2,t1
WHERE t2.id IN (SELECT t3.ref_id FROM t3,t1 where t3.id = t1.id) and t2.id = t1.id;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 30 Using index
-1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.id 1 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 Using where
-2 MATERIALIZED t3 ALL NULL NULL NULL NULL 14
-2 MATERIALIZED t1 eq_ref PRIMARY PRIMARY 4 test.t3.id 1 Using index
+1 PRIMARY t3 ALL NULL NULL NULL NULL 14 Using where; Start temporary
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t3.ref_id 1 Using where; Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t3.id 1 Using index; End temporary
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t3.ref_id 1 Using where; Using index
SELECT t2.id FROM t2,t1
WHERE t2.id IN (SELECT t3.ref_id FROM t3,t1 where t3.id = t1.id) and t2.id = t1.id;
id
-10
11
+10
set optimizer_switch='materialization=off';
SELECT t2.id FROM t2,t1
WHERE t2.id IN (SELECT t3.ref_id FROM t3,t1 where t3.id = t1.id) and t2.id = t1.id;
@@ -1944,20 +1949,16 @@ AND t3.id_product IN (SELECT id_product FROM t2 t2_3 WHERE t2_3.id_t2 = 18 OR t2
AND t3.id_product IN (SELECT id_product FROM t2 t2_4 WHERE t2_4.id_t2 = 34 OR t2_4.id_t2 = 23)
AND t3.id_product IN (SELECT id_product FROM t2 t2_5 WHERE t2_5.id_t2 = 29 OR t2_5.id_t2 = 28 OR t2_5.id_t2 = 26);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 index PRIMARY PRIMARY 4 NULL 18 Using index
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 Using where
-1 PRIMARY t5 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t2_2 ref id_t2,id_product id_t2 5 const 12 Using where; Start temporary
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t2_2.id_product 1 Using where; Using index; End temporary
1 PRIMARY t4 eq_ref PRIMARY PRIMARY 8 test.t3.id_product,const 1 Using where; Using index
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 Using where
-1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 4 func 1 Using where
-1 PRIMARY <subquery5> eq_ref distinct_key distinct_key 4 func 1 Using where
+1 PRIMARY t2_3 range id_t2,id_product id_t2 5 NULL 33 Using index condition; Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_4 range id_t2,id_product id_t2 5 NULL 18 Using index condition; Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_5 range id_t2,id_product id_t2 5 NULL 31 Using index condition; Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t5 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
1 PRIMARY t1 index NULL PRIMARY 8 NULL 73 Using where; Using index; Using join buffer (flat, BNL join)
-1 PRIMARY <subquery6> eq_ref distinct_key distinct_key 4 func 1 Using where
-3 MATERIALIZED t2_2 ref id_t2,id_product id_t2 5 const 12
2 MATERIALIZED t2_1 ALL id_t2,id_product NULL NULL NULL 223 Using where
-4 MATERIALIZED t2_3 range id_t2,id_product id_t2 5 NULL 33 Using index condition; Using where
-5 MATERIALIZED t2_4 range id_t2,id_product id_t2 5 NULL 18 Using index condition; Using where
-6 MATERIALIZED t2_5 range id_t2,id_product id_t2 5 NULL 31 Using index condition; Using where
set optimizer_switch='rowid_filter=default';
drop table t1,t2,t3,t4,t5;
set global innodb_stats_persistent= @innodb_stats_persistent_save;
diff --git a/mysql-test/main/subselect_sj2_mat.test b/mysql-test/main/subselect_sj2_mat.test
index 4b768652670..e38418f4798 100644
--- a/mysql-test/main/subselect_sj2_mat.test
+++ b/mysql-test/main/subselect_sj2_mat.test
@@ -300,7 +300,8 @@ set optimizer_switch=@save_optimizer_switch;
CREATE TABLE t1 (f1 varchar(8), KEY(f1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES ('qux'),('foo');
CREATE TABLE t2 (f2 varchar(8)) ENGINE=InnoDB;
-INSERT INTO t2 VALUES ('bar'),('foo'),('qux');
+INSERT INTO t2 VALUES ('bar'),('foo'),('qux'),('qq1'),('qq2');
+analyze table t1,t2 persistent for all;
let $q=
SELECT f1 FROM t1
diff --git a/mysql-test/main/subselect_sj_jcl6.result b/mysql-test/main/subselect_sj_jcl6.result
index 6efa3fc12b1..5971fa30e89 100644
--- a/mysql-test/main/subselect_sj_jcl6.result
+++ b/mysql-test/main/subselect_sj_jcl6.result
@@ -87,24 +87,24 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t10` join `test`.`t12` join `test`.`t1` where `test`.`t12`.`pk` = `test`.`t10`.`a` and `test`.`t10`.`pk` = `test`.`t1`.`a`
subqueries within outer joins go into ON expr.
-explAin extended
+explain extended
select * from t1 left join (t2 A, t2 B) on ( A.A= t1.A And B.A in (select pk from t10));
-id select_type tABle type possiBle_keys key key_len ref rows filtered ExtrA
+id select_type table type possible_keys key key_len ref rows filtered ExtrA
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
1 PRIMARY A ALL NULL NULL NULL NULL 3 100.00 Using where; Using join Buffer (flAt, BNL join)
1 PRIMARY B ALL NULL NULL NULL NULL 3 100.00 Using where; Using join Buffer (incrementAl, BNL join)
-2 MATERIALIZED t10 index PRIMARY PRIMARY 4 NULL 10 100.00 Using index
+2 DEPENDENT SUBQUERY t10 unique_suBquery PRIMARY PRIMARY 4 func 1 100.00 Using index
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`A` AS `A`,`test`.`t1`.`B` AS `B`,`test`.`A`.`A` AS `A`,`test`.`A`.`B` AS `B`,`test`.`B`.`A` AS `A`,`test`.`B`.`B` AS `B` from `test`.`t1` left join (`test`.`t2` `A` join `test`.`t2` `B`) on(`test`.`A`.`A` = `test`.`t1`.`A` And <in_optimizer>(`test`.`B`.`A`,`test`.`B`.`A` in ( <mAteriAlize> (/* select#2 */ select `test`.`t10`.`pk` from `test`.`t10` ), <primAry_index_lookup>(`test`.`B`.`A` in <temporAry tABle> on distinct_key where `test`.`B`.`A` = `<suBquery2>`.`pk`)))) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`A` AS `A`,`test`.`t1`.`B` AS `B`,`test`.`A`.`A` AS `A`,`test`.`A`.`B` AS `B`,`test`.`B`.`A` AS `A`,`test`.`B`.`B` AS `B` from `test`.`t1` left join (`test`.`t2` `A` join `test`.`t2` `B`) on(`test`.`A`.`A` = `test`.`t1`.`A` And <in_optimizer>(`test`.`B`.`A`,<exists>(<primAry_index_lookup>(<cAche>(`test`.`B`.`A`) in t10 on PRIMARY)))) where 1
t2 should be wrapped into OJ-nest, so we have "t1 LJ (t2 J t10)"
-explAin extended
+explain extended
select * from t1 left join t2 on (t2.A= t1.A And t2.A in (select pk from t10));
-id select_type tABle type possiBle_keys key key_len ref rows filtered ExtrA
+id select_type table type possible_keys key key_len ref rows filtered ExtrA
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
1 PRIMARY t2 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join Buffer (flAt, BNL join)
-2 MATERIALIZED t10 index PRIMARY PRIMARY 4 NULL 10 100.00 Using index
+2 DEPENDENT SUBQUERY t10 unique_suBquery PRIMARY PRIMARY 4 func 1 100.00 Using index
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`A` AS `A`,`test`.`t1`.`B` AS `B`,`test`.`t2`.`A` AS `A`,`test`.`t2`.`B` AS `B` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`A` = `test`.`t1`.`A` And <in_optimizer>(`test`.`t1`.`A`,`test`.`t1`.`A` in ( <mAteriAlize> (/* select#2 */ select `test`.`t10`.`pk` from `test`.`t10` ), <primAry_index_lookup>(`test`.`t1`.`A` in <temporAry tABle> on distinct_key where `test`.`t1`.`A` = `<suBquery2>`.`pk`)))) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`A` AS `A`,`test`.`t1`.`B` AS `B`,`test`.`t2`.`A` AS `A`,`test`.`t2`.`B` AS `B` from `test`.`t1` left join `test`.`t2` on(`test`.`t2`.`A` = `test`.`t1`.`A` And <in_optimizer>(`test`.`t1`.`A`,<exists>(<primAry_index_lookup>(<cAche>(`test`.`t2`.`A`) in t10 on PRIMARY)))) where 1
set @save_join_buffer_size=@@join_buffer_size;
set join_buffer_size=8*1024;
we shouldn't flatten if we're going to get a join of > MAX_TABLES.
@@ -171,26 +171,26 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY s47 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
1 PRIMARY s48 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
1 PRIMARY s49 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m00 ALL NULL NULL NULL NULL 3 Using where
-2 DEPENDENT SUBQUERY m01 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY m02 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m03 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m04 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m05 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m06 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m07 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m08 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m09 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m10 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m11 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m12 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m13 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m14 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m15 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m16 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m17 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m18 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
-2 DEPENDENT SUBQUERY m19 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m00 ALL NULL NULL NULL NULL 3
+2 MATERIALIZED m01 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+2 MATERIALIZED m02 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m03 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m04 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m05 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m06 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m07 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m08 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m09 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m10 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m11 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m12 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m13 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m14 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m15 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m16 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m17 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m18 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+2 MATERIALIZED m19 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
select * from
t1 left join t2 on (t2.a= t1.a and t2.a in (select pk from t10))
where t1.a < 5;
@@ -355,8 +355,8 @@ WHERE PNUM IN
(SELECT PNUM FROM PROJ));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY STAFF ALL NULL NULL NULL NULL 5
-1 PRIMARY PROJ ALL NULL NULL NULL NULL 6 Using join buffer (flat, BNL join)
-1 PRIMARY WORKS ALL NULL NULL NULL NULL 12 Using where; FirstMatch(STAFF); Using join buffer (incremental, BNL join)
+1 PRIMARY PROJ ALL NULL NULL NULL NULL 6 Start temporary; Using join buffer (flat, BNL join)
+1 PRIMARY WORKS ALL NULL NULL NULL NULL 12 Using where; End temporary; Using join buffer (incremental, BNL join)
SELECT EMPNUM, EMPNAME
FROM STAFF
WHERE EMPNUM IN
@@ -513,7 +513,7 @@ EXPLAIN EXTENDED SELECT vkey FROM t0 WHERE pk IN
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t0 ALL PRIMARY NULL NULL NULL 5 100.00
1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t0.pk 1 100.00 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 PRIMARY t2 ref vkey vkey 4 test.t1.vnokey 2 100.00 Using index; FirstMatch(t1)
+1 PRIMARY t2 ref vkey vkey 4 test.t1.vnokey 1 100.00 Using index; FirstMatch(t1)
Warnings:
Note 1003 select `test`.`t0`.`vkey` AS `vkey` from `test`.`t0` `t1` semi join (`test`.`t0` `t2`) join `test`.`t0` where `test`.`t1`.`pk` = `test`.`t0`.`pk` and `test`.`t2`.`vkey` = `test`.`t1`.`vnokey`
SELECT vkey FROM t0 WHERE pk IN
@@ -775,7 +775,7 @@ select a from t1
where a in (select c from t2 where d >= some(select e from t3 where b=e));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 7 100.00
-1 PRIMARY t2 ALL NULL NULL NULL NULL 6 100.00 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 6 16.67 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 4 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.t1.b' of SELECT #3 was resolved in SELECT #1
@@ -813,20 +813,20 @@ PRIMARY KEY (pk)
INSERT INTO t1 VALUES (1,'o','ffff','ffff','ffoo','ffff','ffff','ffff','ffff','ffff','ffff',GeomFromText('POLYGON((0 0, 0 2, 2 2, 2 0, 0 0))')), (2,'f','ffff','ffff','ffff', 'ffff','ffff','ffff','ffff','ffff','ffff',GeomFromText('POLYGON((0 0, 0 2, 2 2, 2 0, 0 0))'));
CREATE TABLE t2 LIKE t1;
INSERT INTO t2 VALUES (1,'i','iiii','iiii','iiii','iiii','ffff','ffff','ffff','ffff','ffff',GeomFromText('POLYGON((0 0, 0 2, 2 2, 2 0, 0 0))')), (2,'f','ffff','ffff','ffff','ffff','ffff','ffff','ffff','ffff','ffff',GeomFromText('POLYGON((0 0, 0 2, 2 2, 2 0, 0 0))'));
+insert into t2 (pk) values (-1),(0);
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (a, b) IN (SELECT a, b FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 11 func,func 1 100.00
-2 MATERIALIZED t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
-Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`pk` > 0
+Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`a` = `test`.`t1`.`a` and `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (a, b) IN (SELECT a, b FROM t2 WHERE pk > 0);
pk
2
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, c) IN (SELECT b, c FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`c` = `test`.`t1`.`c` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, c) IN (SELECT b, c FROM t2 WHERE pk > 0);
@@ -836,7 +836,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, d) IN (SELECT b, d FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`d` = `test`.`t1`.`d` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, d) IN (SELECT b, d FROM t2 WHERE pk > 0);
@@ -845,7 +845,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, e) IN (SELECT b, e FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`e` = `test`.`t1`.`e` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, e) IN (SELECT b, e FROM t2 WHERE pk > 0);
@@ -855,7 +855,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, f) IN (SELECT b, f FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`f` = `test`.`t1`.`f` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, f) IN (SELECT b, f FROM t2 WHERE pk > 0);
@@ -865,7 +865,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, g) IN (SELECT b, g FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`g` = `test`.`t1`.`g` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, g) IN (SELECT b, g FROM t2 WHERE pk > 0);
@@ -875,7 +875,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, h) IN (SELECT b, h FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`h` = `test`.`t1`.`h` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, h) IN (SELECT b, h FROM t2 WHERE pk > 0);
@@ -885,7 +885,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, i) IN (SELECT b, i FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`i` = `test`.`t1`.`i` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, i) IN (SELECT b, i FROM t2 WHERE pk > 0);
@@ -895,7 +895,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, j) IN (SELECT b, j FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`j` = `test`.`t1`.`j` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, j) IN (SELECT b, j FROM t2 WHERE pk > 0);
@@ -905,7 +905,7 @@ pk
EXPLAIN EXTENDED SELECT pk FROM t1 WHERE (b, k) IN (SELECT b, k FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 100.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 50.00 Using index condition; Using where; Rowid-ordered scan; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`b` and `test`.`t2`.`k` = `test`.`t1`.`k` and `test`.`t2`.`pk` > 0
SELECT pk FROM t1 WHERE (b, k) IN (SELECT b, k FROM t2 WHERE pk > 0);
@@ -985,10 +985,9 @@ FROM t1
WHERE `varchar_nokey` < 'n' XOR `pk` ) ;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 18 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1 100.00
-2 MATERIALIZED t1 ALL varchar_key NULL NULL NULL 15 100.00 Using where
+1 PRIMARY t1 ALL varchar_key NULL NULL NULL 15 6.67 Using where; FirstMatch(t2); Using join buffer (flat, BNL join)
Warnings:
-Note 1003 select `test`.`t2`.`varchar_nokey` AS `varchar_nokey` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`varchar_nokey` = `test`.`t1`.`varchar_key` and (`test`.`t1`.`varchar_key` < 'n' xor `test`.`t1`.`pk`)
+Note 1003 select `test`.`t2`.`varchar_nokey` AS `varchar_nokey` from `test`.`t2` semi join (`test`.`t1`) where `test`.`t1`.`varchar_key` = `test`.`t2`.`varchar_nokey` and `test`.`t1`.`varchar_nokey` = `test`.`t2`.`varchar_nokey` and (`test`.`t2`.`varchar_nokey` < 'n' xor `test`.`t1`.`pk`)
SELECT varchar_nokey
FROM t2
WHERE ( `varchar_nokey` , `varchar_nokey` ) IN (
@@ -1067,10 +1066,8 @@ AND t1.val IN (SELECT t3.val FROM t3
WHERE t3.val LIKE 'a%' OR t3.val LIKE 'e%');
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 13 func 1
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 13 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 6 Using where
-3 MATERIALIZED t3 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 6 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 5 Using where; FirstMatch(t2); Using join buffer (incremental, BNL join)
SELECT *
FROM t1
WHERE t1.val IN (SELECT t2.val FROM t2
@@ -1260,8 +1257,8 @@ INSERT INTO t2 VALUES (1, 0), (1, 1), (2, 0), (2, 1);
EXPLAIN
SELECT * FROM t1 WHERE (i) IN (SELECT i FROM t2 where j > 0);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index k k 10 NULL 4 Using where; Using index; Start temporary
-1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t2 ref k k 5 test.t1.i 1 Using where; Using index; Start temporary; End temporary
SELECT * FROM t1 WHERE (i) IN (SELECT i FROM t2 where j > 0);
i
1
@@ -1637,7 +1634,7 @@ drop table t1,t2;
# BUG#787299: Valgrind complains on a join query with two IN subqueries
#
create table t1 (a int);
-insert into t1 values (1), (2), (3);
+insert into t1 values (1), (2), (3),(1000),(2000);
create table t2 as select * from t1;
select * from t1 A, t1 B
where A.a = B.a and A.a in (select a from t2 C) and B.a in (select a from t2 D);
@@ -1645,16 +1642,16 @@ a a
1 1
2 2
3 3
+1000 1000
+2000 2000
explain
select * from t1 A, t1 B
where A.a = B.a and A.a in (select a from t2 C) and B.a in (select a from t2 D);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY A ALL NULL NULL NULL NULL 3
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-1 PRIMARY B ALL NULL NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED C ALL NULL NULL NULL NULL 3
-3 MATERIALIZED D ALL NULL NULL NULL NULL 3
+1 PRIMARY A ALL NULL NULL NULL NULL 5
+1 PRIMARY C ALL NULL NULL NULL NULL 5 Using where; FirstMatch(A); Using join buffer (flat, BNL join)
+1 PRIMARY B ALL NULL NULL NULL NULL 5 Using where; Using join buffer (incremental, BNL join)
+1 PRIMARY D ALL NULL NULL NULL NULL 5 Using where; FirstMatch(B); Using join buffer (incremental, BNL join)
drop table t1, t2;
#
# BUG#784441: Abort on semijoin with a view as the inner table
@@ -1991,7 +1988,7 @@ f1 f3 f4 f2 f4
DROP TABLE t1,t2,t3;
#
# BUG#803457: Wrong result with semijoin + view + outer join in maria-5.3-subqueries-mwl90
-# (Original testcase)
+# (Original, slightly modified testcase)
#
CREATE TABLE t1 (f1 int, f2 int );
INSERT INTO t1 VALUES (2,0),(4,0),(0,NULL);
@@ -2001,24 +1998,22 @@ CREATE TABLE t3 ( f1 int, f3 int );
INSERT INTO t3 VALUES (2,0),(4,0),(0,NULL),(4,0),(8,0);
CREATE TABLE t4 ( f2 int, KEY (f2) );
INSERT INTO t4 VALUES (0),(NULL);
-CREATE VIEW v4 AS SELECT DISTINCT f2 FROM t4 ;
+INSERT INTO t4 VALUES (0),(NULL),(-1),(-2),(-3);
# The following must not have outer joins:
explain extended
-SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4);
+SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4 where f2 = 0 or f2 IS NULL);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
+1 PRIMARY t4 ref_or_null f2 f2 5 const 4 25.00 Using where; Using index; FirstMatch(t2)
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
1 PRIMARY t3 ALL NULL NULL NULL NULL 5 100.00 Using where; Using join buffer (incremental, BNL join)
-2 MATERIALIZED t4 index f2 f2 5 NULL 2 100.00 Using index
Warnings:
-Note 1003 select `test`.`t1`.`f1` AS `f1`,`test`.`t1`.`f2` AS `f2`,`test`.`t2`.`f3` AS `f3`,`test`.`t3`.`f3` AS `f3` from `test`.`t1` semi join (`test`.`t4`) join `test`.`t2` join `test`.`t3` where `test`.`t3`.`f1` = `test`.`t1`.`f1` and `test`.`t1`.`f2` = `test`.`t2`.`f2`
-SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4);
+Note 1003 select `test`.`t1`.`f1` AS `f1`,`test`.`t1`.`f2` AS `f2`,`test`.`t2`.`f3` AS `f3`,`test`.`t3`.`f3` AS `f3` from `test`.`t1` semi join (`test`.`t4`) join `test`.`t2` join `test`.`t3` where `test`.`t4`.`f2` = `test`.`t2`.`f3` and `test`.`t3`.`f1` = `test`.`t1`.`f1` and `test`.`t1`.`f2` = `test`.`t2`.`f2` and (`test`.`t2`.`f3` = 0 or `test`.`t2`.`f3` is null)
+SELECT * FROM t1 NATURAL LEFT JOIN (t2, t3) WHERE t2.f3 IN (SELECT * FROM t4 where f2 = 0 or f2 IS NULL);
f1 f2 f3 f3
2 0 0 0
4 0 0 0
4 0 0 0
-drop view v4;
drop table t1, t2, t3, t4;
#
# BUG#803303: Wrong result with semijoin=on, outer join in maria-5.3-subqueries-mwl90
@@ -2164,9 +2159,9 @@ INSERT INTO t3 VALUES (6,5),(6,2),(8,0),(9,1),(6,5);
explain
SELECT * FROM t1, t2 WHERE (t2.a , t1.b) IN (SELECT a, b FROM t3);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2
-1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 2 Using index; Using join buffer (flat, BNL join)
-1 PRIMARY t3 ALL b NULL NULL NULL 5 Using where; Start temporary; End temporary; Using join buffer (incremental, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
+1 PRIMARY t3 ref b b 5 test.t1.b 1 Using where; Start temporary; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 test.t3.a 1 Using index; End temporary
SELECT * FROM t1, t2 WHERE (t2.a , t1.b) IN (SELECT a, b FROM t3);
b a
5 6
@@ -2189,10 +2184,10 @@ INSERT INTO t5 VALUES (7,0),(9,0);
explain
SELECT * FROM t3 WHERE t3.a IN (SELECT t5.a FROM t2, t4, t5 WHERE t2.c = t5.a AND t2.b = t5.b);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t5 index a a 10 NULL 2 Using where; Using index; LooseScan
-1 PRIMARY t2 ref b b 5 test.t5.b 2 Using where
-1 PRIMARY t4 ALL NULL NULL NULL NULL 3 FirstMatch(t5)
-1 PRIMARY t3 ALL NULL NULL NULL NULL 15 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t5 index a a 10 NULL 2 Using where; Using index; Start temporary
+1 PRIMARY t2 ref b b 5 test.t5.b 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 PRIMARY t4 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 15 Using where; End temporary; Using join buffer (incremental, BNL join)
SELECT * FROM t3 WHERE t3.a IN (SELECT t5.a FROM t2, t4, t5 WHERE t2.c = t5.a AND t2.b = t5.b);
a
0
@@ -2271,10 +2266,10 @@ alias1.c IN (SELECT SQ3_alias1.b
FROM t2 AS SQ3_alias1 STRAIGHT_JOIN t2 AS SQ3_alias2)
LIMIT 100;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 20
-1 PRIMARY alias2 ALL NULL NULL NULL NULL 20 Using join buffer (flat, BNL join)
-1 PRIMARY t2 ALL NULL NULL NULL NULL 20 Using join buffer (incremental, BNL join)
-1 PRIMARY SQ3_alias1 ALL NULL NULL NULL NULL 20 Using where; Start temporary; Using join buffer (incremental, BNL join)
+1 PRIMARY alias2 ALL NULL NULL NULL NULL 20
+1 PRIMARY t2 ALL NULL NULL NULL NULL 20 Using join buffer (flat, BNL join)
+1 PRIMARY SQ3_alias1 ALL NULL NULL NULL NULL 20 Start temporary; Using join buffer (incremental, BNL join)
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 20 Using where; Using join buffer (incremental, BNL join)
1 PRIMARY SQ3_alias2 index NULL PRIMARY 4 NULL 20 Using index; End temporary; Using join buffer (incremental, BNL join)
2 DERIVED t2 ALL NULL NULL NULL NULL 20
create table t3 as
@@ -2447,9 +2442,9 @@ SET SESSION optimizer_switch='loosescan=off';
EXPLAIN
SELECT * FROM t1 WHERE a IN (SELECT t2.a FROM t2,t3 WHERE t2.b = t3.b);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index idx idx 9 NULL 2 Using where; Using index; Start temporary
-1 PRIMARY t3 ref idx idx 4 test.t2.b 1 Using index
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4
+1 PRIMARY t2 range idx idx 4 NULL 2 Using where; Using index
+1 PRIMARY t3 ref idx idx 4 test.t2.b 1 Using index; FirstMatch(t1)
SELECT * FROM t1 WHERE a IN (SELECT t2.a FROM t2,t3 WHERE t2.b = t3.b);
a
5
@@ -2457,9 +2452,9 @@ SET SESSION optimizer_switch='loosescan=on';
EXPLAIN
SELECT * FROM t1 WHERE a IN (SELECT t2.a FROM t2,t3 WHERE t2.b = t3.b);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index idx idx 9 NULL 2 Using where; Using index; Start temporary
-1 PRIMARY t3 ref idx idx 4 test.t2.b 1 Using index
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4
+1 PRIMARY t2 range idx idx 4 NULL 2 Using where; Using index
+1 PRIMARY t3 ref idx idx 4 test.t2.b 1 Using index; FirstMatch(t1)
SELECT * FROM t1 WHERE a IN (SELECT t2.a FROM t2,t3 WHERE t2.b = t3.b);
a
5
@@ -2510,10 +2505,9 @@ SELECT * FROM t1, t2
WHERE t1.a = t2.a AND t2.a IN (SELECT b FROM t3 STRAIGHT_JOIN t4);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 system NULL NULL NULL NULL 1
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 1
1 PRIMARY t1 ref a a 5 const 1 Using index
1 PRIMARY t2 ref a a 5 func 1 Using index
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 0
+1 PRIMARY t4 ALL NULL NULL NULL NULL 0 FirstMatch(t2); Using join buffer (flat, BNL join)
SELECT * FROM t1, t2
WHERE t1.a = t2.a AND t2.a IN (SELECT b FROM t3 STRAIGHT_JOIN t4);
a a
@@ -2573,7 +2567,7 @@ INSERT INTO t1 VALUES
(6,3),(7,1),(8,4),(9,3),(10,2);
CREATE TABLE t2 ( c INT, d INT, KEY(c) );
INSERT INTO t2 VALUES
-(1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1);
+(1,2),(2,1),(3,3),(4,2),(5,5),(6,3),(7,1),(11,11);
analyze table t1,t2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
@@ -2583,18 +2577,20 @@ test.t2 analyze status OK
explain
SELECT a, b, d FROM t1, t2
WHERE ( b, d ) IN
-( SELECT b, d FROM t1, t2 WHERE b = c );
+( SELECT b, d FROM t1 as t3, t2 as t4 WHERE b = c );
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 7
-1 PRIMARY t1 index b b 5 NULL 10 Using where; Using index; LooseScan
-1 PRIMARY t2 ref c c 5 test.t1.b 1 Using where; FirstMatch(t1)
-1 PRIMARY t1 ref b b 5 test.t1.b 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 PRIMARY t3 index b b 5 NULL 10 Using where; Using index; Start temporary
+1 PRIMARY t4 ref c c 5 test.t3.b 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 PRIMARY t1 ALL b NULL NULL NULL 10 Using where; Using join buffer (incremental, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 8 Using where; End temporary; Using join buffer (incremental, BNL join)
SELECT a, b, d FROM t1, t2
WHERE ( b, d ) IN
-( SELECT b, d FROM t1, t2 WHERE b = c );
+( SELECT b, d FROM t1 as t3, t2 as t4 WHERE b = c );
a b d
1 2 1
1 2 1
+10 2 1
+10 2 1
2 1 2
2 1 2
3 3 3
@@ -2610,8 +2606,6 @@ a b d
8 4 2
9 3 3
9 3 3
-10 2 1
-10 2 1
DROP TABLE t1, t2;
# Another testcase for the above that still uses LooseScan:
create table t0(a int primary key);
@@ -2780,21 +2774,21 @@ WHERE (t1_1.a, t1_2.a) IN ( SELECT a, b FROM v1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1_1 ALL NULL NULL NULL NULL 11 Using where
1 PRIMARY t1_2 ALL NULL NULL NULL NULL 11
-1 PRIMARY <derived3> ref key0 key0 5 test.t1_1.a 2 Using where; FirstMatch(t1_2)
+1 PRIMARY <derived3> ref key0 key0 5 test.t1_1.a 1 Using where; FirstMatch(t1_2)
3 DERIVED t1 ALL NULL NULL NULL NULL 11
SELECT * FROM t1 AS t1_1, t1 AS t1_2
WHERE (t1_1.a, t1_2.a) IN ( SELECT a, b FROM v1 );
a b a b
-3 1 9 1
-5 8 4 0
-3 9 9 1
2 4 4 0
2 4 6 8
2 6 4 0
2 6 6 8
+3 1 9 1
+3 9 9 1
5 4 4 0
-7 7 7 7
5 4 4 0
+5 8 4 0
+7 7 7 7
DROP VIEW v1;
DROP TABLE t1;
set @@join_cache_level= @tmp_jcl_978479;
@@ -2938,9 +2932,9 @@ alias2.col_int_key = alias1.col_int_key
WHERE alias1.pk = 58 OR alias1.col_varchar_key = 'o'
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias1 index_merge PRIMARY,col_int_key,col_varchar_key PRIMARY,col_varchar_key 4,4 NULL 2 Using sort_union(PRIMARY,col_varchar_key); Using where; Start temporary
-1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY alias2 ALL col_int_key NULL NULL NULL 12 Range checked for each record (index map: 0x2); End temporary
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2
+1 PRIMARY alias1 index_merge PRIMARY,col_int_key,col_varchar_key PRIMARY,col_varchar_key 4,4 NULL 2 Using sort_union(PRIMARY,col_varchar_key); Using where
+1 PRIMARY alias2 ALL col_int_key NULL NULL NULL 12 Range checked for each record (index map: 0x2); FirstMatch(t2)
SELECT *
FROM t2
WHERE (field1) IN (SELECT alias1.col_varchar_nokey AS field1
@@ -3048,7 +3042,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 system NULL NULL NULL NULL 1 100.00 Using temporary; Using filesort
1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 5 100.00 Start temporary
1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t1.pk 1 100.00 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 PRIMARY t3 ALL NULL NULL NULL NULL 2 100.00 Using where; End temporary; Using join buffer (incremental, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 2 10.00 Using where; End temporary; Using join buffer (incremental, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1` AS `c1`,'x' AS `c2` from `test`.`t1` semi join (`test`.`t1` left join `test`.`t3` on(`test`.`t1`.`c1` = `test`.`t3`.`c3`)) where `test`.`t1`.`pk` = `test`.`t1`.`pk` order by 'x',`test`.`t1`.`c1`
DROP TABLE t1,t2,t3;
@@ -3300,8 +3294,7 @@ explain extended
SELECT Id FROM t1 WHERE Id in (SELECT t1_Id FROM t2 WHERE t2.col1 IS NULL);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t2 ref col1 col1 5 const 2 100.00 Using index condition; Using where
+1 PRIMARY t2 ref col1 col1 5 const 2 50.00 Using index condition; Using where; FirstMatch(t1)
Warnings:
Note 1003 select 1 AS `Id` from (`test`.`t2`) where `test`.`t2`.`t1_Id` = 1 and `test`.`t2`.`col1` is null
DROP TABLE t1, t2;
@@ -3416,13 +3409,13 @@ WHERE t2.a IN (SELECT b FROM t3 WHERE t3.d <= t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
1 PRIMARY t2 ALL PRIMARY NULL NULL NULL 4 Using join buffer (flat, BNL join)
-1 PRIMARY t3 ALL d NULL NULL NULL 5 Range checked for each record (index map: 0x2); FirstMatch(t2)
+1 PRIMARY t3 hash_ALL d #hash#$hj 5 test.t2.a 5 Using where; FirstMatch(t2); Using join buffer (flat, BNLH join)
SELECT * FROM t1, t2
WHERE t2.a IN (SELECT b FROM t3 WHERE t3.d <= t1.a);
a a b
+w 5 19:11:10
w 2 18:56:33
q 2 18:56:33
-w 5 19:11:10
SET SESSION optimizer_switch='mrr=on';
SET SESSION join_cache_level=6;
EXPLAIN
@@ -3431,13 +3424,13 @@ WHERE t2.a IN (SELECT b FROM t3 WHERE t3.d <= t1.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
1 PRIMARY t2 ALL PRIMARY NULL NULL NULL 4 Using join buffer (flat, BNL join)
-1 PRIMARY t3 ALL d NULL NULL NULL 5 Range checked for each record (index map: 0x2); FirstMatch(t2)
+1 PRIMARY t3 hash_ALL d #hash#$hj 5 test.t2.a 5 Using where; FirstMatch(t2); Using join buffer (incremental, BNLH join)
SELECT * FROM t1, t2
WHERE t2.a IN (SELECT b FROM t3 WHERE t3.d <= t1.a);
a a b
+w 5 19:11:10
w 2 18:56:33
q 2 18:56:33
-w 5 19:11:10
set optimizer_switch=@save_optimizer_switch;
set join_cache_level=default;
DROP TABLE t1,t2,t3;
@@ -3482,20 +3475,21 @@ INSERT INTO t2 VALUES ('v'), ('v'), ('s'), ('j');
CREATE TABLE t3 (c varchar(1), d varchar(1), INDEX idx_c(c) );
INSERT INTO t3 VALUES ('v','v'), ('v','v'), ('s','s'), ('j','j');
INSERT INTO t3 VALUES ('m','m'), ('d','d'), ('k','k'), ('m','m');
+insert into t1 select 'z','z' from seq_1_to_20;
set @tmp_otimizer_switch= @@optimizer_switch;
set @tmp_join_cache_level=@@join_cache_level;
set optimizer_switch = 'materialization=on,semijoin=on,join_cache_hashed=on';
set join_cache_level=0;
EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON (c = b)
-WHERE (a, b) IN (SELECT a, b FROM t1 t);
+WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL idx_a NULL NULL NULL 3
+1 PRIMARY t1 ALL idx_a NULL NULL NULL 23
+1 PRIMARY t2 ref idx_c idx_c 4 test.t1.b 1 Using where; Using index
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1
-1 PRIMARY t2 ref idx_c idx_c 4 test.t1.b 2 Using where; Using index
-2 MATERIALIZED t ALL idx_a NULL NULL NULL 3
+2 MATERIALIZED t ALL idx_a NULL NULL NULL 23
SELECT * FROM t1 LEFT JOIN t2 ON (c = b)
-WHERE (a, b) IN (SELECT a, b FROM t1 t);
+WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
a b c
v v v
v v v
@@ -3503,14 +3497,14 @@ w w NULL
t t NULL
EXPLAIN
SELECT * FROM t1 LEFT JOIN t3 ON (c = b)
-WHERE (a, b) IN (SELECT a, b FROM t1 t);
+WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL idx_a NULL NULL NULL 3
+1 PRIMARY t1 ALL idx_a NULL NULL NULL 23
+1 PRIMARY t3 ref idx_c idx_c 4 test.t1.b 1 Using where
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1
-1 PRIMARY t3 ref idx_c idx_c 4 test.t1.b 2 Using where
-2 MATERIALIZED t ALL idx_a NULL NULL NULL 3
+2 MATERIALIZED t ALL idx_a NULL NULL NULL 23
SELECT * FROM t1 LEFT JOIN t3 ON (c = b)
-WHERE (a, b) IN (SELECT a, b FROM t1 t);
+WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
a b c d
v v v v
v v v v
@@ -3519,14 +3513,14 @@ t t NULL NULL
set join_cache_level=6;
EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON (c = b)
-WHERE (a, b) IN (SELECT a, b FROM t1 t);
+WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL idx_a NULL NULL NULL 3
+1 PRIMARY t1 ALL idx_a NULL NULL NULL 23
+1 PRIMARY t2 ref idx_c idx_c 4 test.t1.b 1 Using where; Using index
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1
-1 PRIMARY t2 ref idx_c idx_c 4 test.t1.b 2 Using where; Using index
-2 MATERIALIZED t ALL idx_a NULL NULL NULL 3
+2 MATERIALIZED t ALL idx_a NULL NULL NULL 23
SELECT * FROM t1 LEFT JOIN t2 ON (c = b)
-WHERE (a, b) IN (SELECT a, b FROM t1 t);
+WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
a b c
v v v
v v v
@@ -3534,14 +3528,14 @@ w w NULL
t t NULL
EXPLAIN
SELECT * FROM t1 LEFT JOIN t3 ON (c = b)
-WHERE (a, b) IN (SELECT a, b FROM t1 t);
+WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL idx_a NULL NULL NULL 3
+1 PRIMARY t1 ALL idx_a NULL NULL NULL 23
+1 PRIMARY t3 ref idx_c idx_c 4 test.t1.b 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1
-1 PRIMARY t3 ref idx_c idx_c 4 test.t1.b 2 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-2 MATERIALIZED t ALL idx_a NULL NULL NULL 3
+2 MATERIALIZED t ALL idx_a NULL NULL NULL 23
SELECT * FROM t1 LEFT JOIN t3 ON (c = b)
-WHERE (a, b) IN (SELECT a, b FROM t1 t);
+WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
a b c d
v v v v
v v v v
@@ -3568,9 +3562,8 @@ SELECT a FROM t1 t WHERE a IN (SELECT b FROM t1, t2 WHERE b = a)
GROUP BY a HAVING a != 'z';
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t range idx_a idx_a 4 NULL 3 Using where; Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using where
-2 MATERIALIZED t1 ref idx_a idx_a 4 test.t2.b 2 Using index
+1 PRIMARY t1 ref idx_a idx_a 4 test.t.a 1 Using index
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t)
SELECT a FROM t1 t WHERE a IN (SELECT b FROM t1, t2 WHERE b = a)
GROUP BY a HAVING a != 'z';
a
@@ -3582,9 +3575,8 @@ SELECT a FROM t1 t WHERE a IN (SELECT b FROM t1, t2 WHERE b = a)
GROUP BY a HAVING a != 'z';
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t range idx_a idx_a 4 NULL 3 Using where; Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using where
-2 MATERIALIZED t1 ref idx_a idx_a 4 test.t2.b 2 Using index
+1 PRIMARY t1 ref idx_a idx_a 4 test.t.a 1 Using index
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t)
SELECT a FROM t1 t WHERE a IN (SELECT b FROM t1, t2 WHERE b = a)
GROUP BY a HAVING a != 'z';
a
diff --git a/mysql-test/main/subselect_sj_jcl6.test b/mysql-test/main/subselect_sj_jcl6.test
index f4f605c0406..e39a6887bde 100644
--- a/mysql-test/main/subselect_sj_jcl6.test
+++ b/mysql-test/main/subselect_sj_jcl6.test
@@ -3,6 +3,7 @@
#
--source include/no_valgrind_without_big.inc
--source include/default_optimizer_switch.inc
+--source include/have_sequence.inc
set @save_optimizer_switch_jcl6=@@optimizer_switch;
set @@optimizer_switch='optimize_join_buffer_size=on';
@@ -132,6 +133,8 @@ CREATE TABLE t3 (c varchar(1), d varchar(1), INDEX idx_c(c) );
INSERT INTO t3 VALUES ('v','v'), ('v','v'), ('s','s'), ('j','j');
INSERT INTO t3 VALUES ('m','m'), ('d','d'), ('k','k'), ('m','m');
+insert into t1 select 'z','z' from seq_1_to_20;
+
set @tmp_otimizer_switch= @@optimizer_switch;
set @tmp_join_cache_level=@@join_cache_level;
set optimizer_switch = 'materialization=on,semijoin=on,join_cache_hashed=on';
@@ -140,29 +143,29 @@ set join_cache_level=0;
EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON (c = b)
- WHERE (a, b) IN (SELECT a, b FROM t1 t);
+ WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
SELECT * FROM t1 LEFT JOIN t2 ON (c = b)
- WHERE (a, b) IN (SELECT a, b FROM t1 t);
+ WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
EXPLAIN
SELECT * FROM t1 LEFT JOIN t3 ON (c = b)
- WHERE (a, b) IN (SELECT a, b FROM t1 t);
+ WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
SELECT * FROM t1 LEFT JOIN t3 ON (c = b)
- WHERE (a, b) IN (SELECT a, b FROM t1 t);
+ WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
set join_cache_level=6;
EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON (c = b)
- WHERE (a, b) IN (SELECT a, b FROM t1 t);
+ WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
SELECT * FROM t1 LEFT JOIN t2 ON (c = b)
- WHERE (a, b) IN (SELECT a, b FROM t1 t);
+ WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
EXPLAIN
SELECT * FROM t1 LEFT JOIN t3 ON (c = b)
- WHERE (a, b) IN (SELECT a, b FROM t1 t);
+ WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
SELECT * FROM t1 LEFT JOIN t3 ON (c = b)
- WHERE (a, b) IN (SELECT a, b FROM t1 t);
+ WHERE (a, b) IN (SELECT a, b FROM t1 t) having t1.a !='z';
set optimizer_switch=@tmp_optimizer_switch;
set join_cache_level=@tmp_join_cache_level;
diff --git a/mysql-test/main/subselect_sj_mat.result b/mysql-test/main/subselect_sj_mat.result
index 61a7ff25569..d6582652729 100644
--- a/mysql-test/main/subselect_sj_mat.result
+++ b/mysql-test/main/subselect_sj_mat.result
@@ -107,11 +107,10 @@ a1 a2
explain extended
select * from t1i where a1 in (select b1 from t2i where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1i range _it1_idx _it1_idx # NULL 3 100.00 Using where;
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key # func 1 100.00
-2 MATERIALIZED t2i range it2i1,it2i3 it2i1 # NULL 5 100.00 Using where;
+1 PRIMARY t2i index it2i1,it2i3 it2i1 # NULL 5 50.00 Using where; Using index; LooseScan
+1 PRIMARY t1i ref _it1_idx _it1_idx # _ref_ 1 20.00
Warnings:
-Note 1003 select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` semi join (`test`.`t2i`) where `test`.`t2i`.`b1` > '0'
+Note 1003 select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` semi join (`test`.`t2i`) where `test`.`t1i`.`a1` = `test`.`t2i`.`b1` and `test`.`t2i`.`b1` > '0'
select * from t1i where a1 in (select b1 from t2i where b1 > '0');
a1 a2
1 - 01 2 - 01
@@ -119,11 +118,11 @@ a1 a2
explain extended
select * from t1i where a1 in (select max(b1) from t2i where b1 > '0' group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1i index it1i1,it1i3 # 18 # 3 100.00 #
-1 PRIMARY <subquery2> eq_ref distinct_key # 8 # 1 100.00 #
+1 PRIMARY <subquery2> ALL distinct_key # NULL # 5 100.00 #
+1 PRIMARY t1i ref it1i1,it1i3 # 9 # 1 100.00 #
2 MATERIALIZED t2i range it2i1,it2i3 # 9 # 5 100.00 #
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from <materialize> (/* select#2 */ select max(`test`.`t2i`.`b1`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1`) join `test`.`t1i` where `<subquery2>`.`max(b1)` = `test`.`t1i`.`a1`
+Note 1003 /* select#1 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from <materialize> (/* select#2 */ select max(`test`.`t2i`.`b1`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1`) join `test`.`t1i` where `test`.`t1i`.`a1` = `<subquery2>`.`max(b1)`
select * from t1i where a1 in (select max(b1) from t2i where b1 > '0' group by b1);
a1 a2
1 - 01 2 - 01
@@ -131,11 +130,10 @@ a1 a2
explain extended
select * from t1i where (a1, a2) in (select b1, b2 from t2i where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1i range _it1_idx _it1_idx # NULL 3 100.00 Using where;
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key # func,func 1 100.00
-2 MATERIALIZED t2i range it2i1,it2i2,it2i3 it2i3 # NULL 5 100.00 Using where;
+1 PRIMARY t2i index it2i1,it2i2,it2i3 it2i3 # NULL 5 50.00 Using where; Using index; LooseScan
+1 PRIMARY t1i ref _it1_idx _it1_idx # _ref_ 1 20.00
Warnings:
-Note 1003 select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` semi join (`test`.`t2i`) where `test`.`t2i`.`b1` > '0'
+Note 1003 select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` semi join (`test`.`t2i`) where `test`.`t1i`.`a1` = `test`.`t2i`.`b1` and `test`.`t1i`.`a2` = `test`.`t2i`.`b2` and `test`.`t2i`.`b1` > '0'
select * from t1i where (a1, a2) in (select b1, b2 from t2i where b1 > '0');
a1 a2
1 - 01 2 - 01
@@ -143,11 +141,11 @@ a1 a2
explain extended
select * from t1i where (a1, a2) in (select b1, max(b2) from t2i where b1 > '0' group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1i index it1i1,it1i2,it1i3 # # # 3 100.00 #
-1 PRIMARY <subquery2> eq_ref distinct_key # # # 1 100.00 #
+1 PRIMARY <subquery2> ALL distinct_key # # # 5 100.00 #
+1 PRIMARY t1i ref it1i1,it1i2,it1i3 # # # 1 100.00 #
2 MATERIALIZED t2i range it2i1,it2i3 # # # 5 100.00 #
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,max(`test`.`t2i`.`b2`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1`) join `test`.`t1i` where `<subquery2>`.`b1` = `test`.`t1i`.`a1` and `<subquery2>`.`max(b2)` = `test`.`t1i`.`a2`
+Note 1003 /* select#1 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,max(`test`.`t2i`.`b2`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1`) join `test`.`t1i` where `test`.`t1i`.`a1` = `<subquery2>`.`b1` and `test`.`t1i`.`a2` = `<subquery2>`.`max(b2)`
select * from t1i where (a1, a2) in (select b1, max(b2) from t2i where b1 > '0' group by b1);
a1 a2
1 - 01 2 - 01
@@ -155,11 +153,11 @@ a1 a2
explain extended
select * from t1i where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0' group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1i index it1i1,it1i2,it1i3 # # # 3 100.00 #
-1 PRIMARY <subquery2> eq_ref distinct_key # # # 1 100.00 #
+1 PRIMARY <subquery2> ALL distinct_key # # # 5 100.00 #
+1 PRIMARY t1i ref it1i1,it1i2,it1i3 # # # 1 100.00 #
2 MATERIALIZED t2i range it2i1,it2i3 # # # 5 100.00 #
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,min(`test`.`t2i`.`b2`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1`) join `test`.`t1i` where `<subquery2>`.`b1` = `test`.`t1i`.`a1` and `<subquery2>`.`min(b2)` = `test`.`t1i`.`a2`
+Note 1003 /* select#1 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,min(`test`.`t2i`.`b2`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1`) join `test`.`t1i` where `test`.`t1i`.`a1` = `<subquery2>`.`b1` and `test`.`t1i`.`a2` = `<subquery2>`.`min(b2)`
select * from t1i where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0' group by b1);
a1 a2
1 - 01 2 - 01
@@ -279,7 +277,7 @@ explain extended
select * from t1i where (a1, a2) in (select b1, b2 from t2i order by b1, b2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2i index it2i1,it2i2,it2i3 it2i3 18 NULL 5 50.00 Using where; Using index; LooseScan
-1 PRIMARY t1i ref it1i1,it1i2,it1i3 it1i3 18 test.t2i.b1,test.t2i.b2 1 100.00 Using index
+1 PRIMARY t1i ref it1i1,it1i2,it1i3 it1i3 18 test.t2i.b1,test.t2i.b2 1 20.00 Using index
Warnings:
Note 1003 select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` semi join (`test`.`t2i`) where `test`.`t1i`.`a1` = `test`.`t2i`.`b1` and `test`.`t1i`.`a2` = `test`.`t2i`.`b2`
select * from t1i where (a1, a2) in (select b1, b2 from t2i order by b1, b2);
@@ -331,14 +329,12 @@ where (a1, a2) in (select b1, b2 from t2 where b1 > '0') and
(a1, a2) in (select c1, c2 from t3
where (c1, c2) in (select b1, b2 from t2i where b2 > '0'));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 16 func,func 1 100.00
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 16 func,func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
-3 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
-3 MATERIALIZED t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t3.c1,test.t3.c2 1 100.00 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+1 PRIMARY t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t1.a1,test.t1.a2 1 100.00 Using index; Start temporary
+1 PRIMARY t2 ALL NULL NULL NULL NULL 5 20.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 4 15.00 Using where; End temporary; Using join buffer (flat, BNL join)
Warnings:
-Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2`) semi join (`test`.`t2i` join `test`.`t3`) where `test`.`t2i`.`b1` = `test`.`t3`.`c1` and `test`.`t2i`.`b2` = `test`.`t3`.`c2` and `test`.`t2`.`b1` > '0' and `test`.`t3`.`c2` > '0'
+Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2`) semi join (`test`.`t2i` join `test`.`t3`) where `test`.`t2i`.`b1` = `test`.`t1`.`a1` and `test`.`t2`.`b1` = `test`.`t1`.`a1` and `test`.`t3`.`c1` = `test`.`t1`.`a1` and `test`.`t2i`.`b2` = `test`.`t1`.`a2` and `test`.`t2`.`b2` = `test`.`t1`.`a2` and `test`.`t3`.`c2` = `test`.`t1`.`a2` and `test`.`t1`.`a1` > '0' and `test`.`t1`.`a2` > '0'
select * from t1
where (a1, a2) in (select b1, b2 from t2 where b1 > '0') and
(a1, a2) in (select c1, c2 from t3
@@ -352,14 +348,12 @@ where (a1, a2) in (select b1, b2 from t2i where b1 > '0') and
(a1, a2) in (select c1, c2 from t3i
where (c1, c2) in (select b1, b2 from t2i where b2 > '0'));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1i range it1i1,it1i2,it1i3 # # # 3 100.00 #
-1 PRIMARY <subquery2> eq_ref distinct_key # # # 1 100.00 #
-1 PRIMARY <subquery3> eq_ref distinct_key # # # 1 100.00 #
-2 MATERIALIZED t2i range it2i1,it2i2,it2i3 # # # 5 100.00 #
-3 MATERIALIZED t3i range it3i1,it3i2,it3i3 # # # 4 100.00 #
-3 MATERIALIZED t2i ref it2i1,it2i2,it2i3 # # # 1 100.00 #
+1 PRIMARY t2i index it2i1,it2i2,it2i3 # # # 5 50.00 #
+1 PRIMARY t1i ref it1i1,it1i2,it1i3 # # # 1 20.00 #
+1 PRIMARY t3i ref it3i1,it3i2,it3i3 # # # 1 100.00 #
+1 PRIMARY t2i ref it2i1,it2i2,it2i3 # # # 1 60.00 #
Warnings:
-Note 1003 select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` semi join (`test`.`t2i`) semi join (`test`.`t2i` join `test`.`t3i`) where `test`.`t2i`.`b1` = `test`.`t3i`.`c1` and `test`.`t2i`.`b2` = `test`.`t3i`.`c2` and `test`.`t2i`.`b1` > '0' and `test`.`t3i`.`c2` > '0'
+Note 1003 select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` semi join (`test`.`t2i`) semi join (`test`.`t2i` join `test`.`t3i`) where `test`.`t1i`.`a1` = `test`.`t2i`.`b1` and `test`.`t3i`.`c1` = `test`.`t2i`.`b1` and `test`.`t2i`.`b1` = `test`.`t2i`.`b1` and `test`.`t1i`.`a2` = `test`.`t2i`.`b2` and `test`.`t3i`.`c2` = `test`.`t2i`.`b2` and `test`.`t2i`.`b2` = `test`.`t2i`.`b2` and `test`.`t2i`.`b1` > '0' and `test`.`t2i`.`b2` > '0'
select * from t1i
where (a1, a2) in (select b1, b2 from t2i where b1 > '0') and
(a1, a2) in (select c1, c2 from t3i
@@ -375,16 +369,14 @@ b2 in (select c2 from t3 where c2 LIKE '%03')) and
(a1, a2) in (select c1, c2 from t3
where (c1, c2) in (select b1, b2 from t2i where b2 > '0'));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 16 func,func 1 100.00
-1 PRIMARY <subquery5> eq_ref distinct_key distinct_key 16 func,func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
-5 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
-5 MATERIALIZED t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t3.c1,test.t3.c2 1 100.00 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+1 PRIMARY t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t1.a1,test.t1.a2 1 100.00 Using index; Start temporary
+1 PRIMARY t2 ALL NULL NULL NULL NULL 5 20.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 4 15.00 Using where; End temporary; Using join buffer (flat, BNL join)
4 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
3 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2`) semi join (`test`.`t2i` join `test`.`t3`) where `test`.`t2i`.`b1` = `test`.`t3`.`c1` and `test`.`t2i`.`b2` = `test`.`t3`.`c2` and (<expr_cache><`test`.`t2`.`b2`>(<in_optimizer>(`test`.`t2`.`b2`,`test`.`t2`.`b2` in ( <materialize> (/* select#3 */ select `test`.`t3`.`c2` from `test`.`t3` where `test`.`t3`.`c2` like '%02' ), <primary_index_lookup>(`test`.`t2`.`b2` in <temporary table> on distinct_key where `test`.`t2`.`b2` = `<subquery3>`.`c2`)))) or <expr_cache><`test`.`t2`.`b2`>(<in_optimizer>(`test`.`t2`.`b2`,`test`.`t2`.`b2` in ( <materialize> (/* select#4 */ select `test`.`t3`.`c2` from `test`.`t3` where `test`.`t3`.`c2` like '%03' ), <primary_index_lookup>(`test`.`t2`.`b2` in <temporary table> on distinct_key where `test`.`t2`.`b2` = `<subquery4>`.`c2`))))) and `test`.`t3`.`c2` > '0'
+Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2`) semi join (`test`.`t2i` join `test`.`t3`) where `test`.`t2i`.`b1` = `test`.`t1`.`a1` and `test`.`t2`.`b1` = `test`.`t1`.`a1` and `test`.`t3`.`c1` = `test`.`t1`.`a1` and `test`.`t2i`.`b2` = `test`.`t1`.`a2` and `test`.`t2`.`b2` = `test`.`t1`.`a2` and `test`.`t3`.`c2` = `test`.`t1`.`a2` and (<expr_cache><`test`.`t2`.`b2`>(<in_optimizer>(`test`.`t2`.`b2`,`test`.`t2`.`b2` in ( <materialize> (/* select#3 */ select `test`.`t3`.`c2` from `test`.`t3` where `test`.`t3`.`c2` like '%02' ), <primary_index_lookup>(`test`.`t2`.`b2` in <temporary table> on distinct_key where `test`.`t2`.`b2` = `<subquery3>`.`c2`)))) or <expr_cache><`test`.`t2`.`b2`>(<in_optimizer>(`test`.`t2`.`b2`,`test`.`t2`.`b2` in ( <materialize> (/* select#4 */ select `test`.`t3`.`c2` from `test`.`t3` where `test`.`t3`.`c2` like '%03' ), <primary_index_lookup>(`test`.`t2`.`b2` in <temporary table> on distinct_key where `test`.`t2`.`b2` = `<subquery4>`.`c2`))))) and `test`.`t1`.`a2` > '0'
select * from t1
where (a1, a2) in (select b1, b2 from t2
where b2 in (select c2 from t3 where c2 LIKE '%02') or
@@ -402,7 +394,7 @@ b2 in (select c2 from t3 t3b where c2 LIKE '%03')) and
where (c1, c2) in (select b1, b2 from t2i where b2 > '0'));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
-1 PRIMARY t2 ALL NULL NULL NULL NULL 5 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 5 20.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
1 PRIMARY <subquery5> eq_ref distinct_key distinct_key 16 func,func 1 100.00
5 MATERIALIZED t3c ALL NULL NULL NULL NULL 4 100.00 Using where
5 MATERIALIZED t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t3c.c1,test.t3c.c2 1 100.00 Using index
@@ -435,22 +427,18 @@ where (a1, a2) in (select b1, b2 from t2i where b1 > '0') and
where (c1, c2) in (select b1, b2 from t2i where b2 > '0')));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL # # # 3 100.00 #
-1 PRIMARY <subquery2> eq_ref distinct_key # # # 1 100.00 #
-1 PRIMARY <subquery5> eq_ref distinct_key # # # 1 100.00 #
-2 MATERIALIZED t2 ALL NULL # # # 5 100.00 #
-5 MATERIALIZED t3 ALL NULL # # # 4 100.00 #
-5 MATERIALIZED t2i ref it2i1,it2i2,it2i3 # # # 1 100.00 #
+1 PRIMARY t2i ref it2i1,it2i2,it2i3 # # # 1 100.00 #
+1 PRIMARY t2 ALL NULL # # # 5 20.00 #
+1 PRIMARY t3 ALL NULL # # # 4 15.00 #
4 MATERIALIZED t3 ALL NULL # # # 4 100.00 #
3 MATERIALIZED t3 ALL NULL # # # 4 100.00 #
-7 UNION t1i range it1i1,it1i2,it1i3 # # # 3 100.00 #
-7 UNION <subquery8> eq_ref distinct_key # # # 1 100.00 #
-7 UNION <subquery9> eq_ref distinct_key # # # 1 100.00 #
-8 MATERIALIZED t2i range it2i1,it2i2,it2i3 # # # 5 100.00 #
-9 MATERIALIZED t3i range it3i1,it3i2,it3i3 # # # 4 100.00 #
-9 MATERIALIZED t2i ref it2i1,it2i2,it2i3 # # # 1 100.00 #
+7 UNION t2i index it2i1,it2i2,it2i3 # # # 5 50.00 #
+7 UNION t1i ref it1i1,it1i2,it1i3 # # # 1 20.00 #
+7 UNION t3i ref it3i1,it3i2,it3i3 # # # 1 100.00 #
+7 UNION t2i ref it2i1,it2i2,it2i3 # # # 1 60.00 #
NULL UNION RESULT <union1,7> ALL NULL # # # NULL NULL #
Warnings:
-Note 1003 (/* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2`) semi join (`test`.`t2i` join `test`.`t3`) where `test`.`t2i`.`b1` = `test`.`t3`.`c1` and `test`.`t2i`.`b2` = `test`.`t3`.`c2` and (<expr_cache><`test`.`t2`.`b2`>(<in_optimizer>(`test`.`t2`.`b2`,`test`.`t2`.`b2` in ( <materialize> (/* select#3 */ select `test`.`t3`.`c2` from `test`.`t3` where `test`.`t3`.`c2` like '%02' ), <primary_index_lookup>(`test`.`t2`.`b2` in <temporary table> on distinct_key where `test`.`t2`.`b2` = `<subquery3>`.`c2`)))) or <expr_cache><`test`.`t2`.`b2`>(<in_optimizer>(`test`.`t2`.`b2`,`test`.`t2`.`b2` in ( <materialize> (/* select#4 */ select `test`.`t3`.`c2` from `test`.`t3` where `test`.`t3`.`c2` like '%03' ), <primary_index_lookup>(`test`.`t2`.`b2` in <temporary table> on distinct_key where `test`.`t2`.`b2` = `<subquery4>`.`c2`))))) and `test`.`t3`.`c2` > '0') union (/* select#7 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` semi join (`test`.`t2i`) semi join (`test`.`t2i` join `test`.`t3i`) where `test`.`t2i`.`b1` = `test`.`t3i`.`c1` and `test`.`t2i`.`b2` = `test`.`t3i`.`c2` and `test`.`t2i`.`b1` > '0' and `test`.`t3i`.`c2` > '0')
+Note 1003 (/* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2`) semi join (`test`.`t2i` join `test`.`t3`) where `test`.`t2i`.`b1` = `test`.`t1`.`a1` and `test`.`t2`.`b1` = `test`.`t1`.`a1` and `test`.`t3`.`c1` = `test`.`t1`.`a1` and `test`.`t2i`.`b2` = `test`.`t1`.`a2` and `test`.`t2`.`b2` = `test`.`t1`.`a2` and `test`.`t3`.`c2` = `test`.`t1`.`a2` and (<expr_cache><`test`.`t2`.`b2`>(<in_optimizer>(`test`.`t2`.`b2`,`test`.`t2`.`b2` in ( <materialize> (/* select#3 */ select `test`.`t3`.`c2` from `test`.`t3` where `test`.`t3`.`c2` like '%02' ), <primary_index_lookup>(`test`.`t2`.`b2` in <temporary table> on distinct_key where `test`.`t2`.`b2` = `<subquery3>`.`c2`)))) or <expr_cache><`test`.`t2`.`b2`>(<in_optimizer>(`test`.`t2`.`b2`,`test`.`t2`.`b2` in ( <materialize> (/* select#4 */ select `test`.`t3`.`c2` from `test`.`t3` where `test`.`t3`.`c2` like '%03' ), <primary_index_lookup>(`test`.`t2`.`b2` in <temporary table> on distinct_key where `test`.`t2`.`b2` = `<subquery4>`.`c2`))))) and `test`.`t1`.`a2` > '0') union (/* select#7 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` semi join (`test`.`t2i`) semi join (`test`.`t2i` join `test`.`t3i`) where `test`.`t1i`.`a1` = `test`.`t2i`.`b1` and `test`.`t3i`.`c1` = `test`.`t2i`.`b1` and `test`.`t2i`.`b1` = `test`.`t2i`.`b1` and `test`.`t1i`.`a2` = `test`.`t2i`.`b2` and `test`.`t3i`.`c2` = `test`.`t2i`.`b2` and `test`.`t2i`.`b2` = `test`.`t2i`.`b2` and `test`.`t2i`.`b1` > '0' and `test`.`t2i`.`b2` > '0')
(select * from t1
where (a1, a2) in (select b1, b2 from t2
where b2 in (select c2 from t3 where c2 LIKE '%02') or
@@ -542,9 +530,9 @@ b2 in (select c2 from t3 t3b where c2 LIKE '%03')) and
where (c1, c2) in (select b1, b2 from t2i where b2 > '0' or b2 = a2));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t2 ALL NULL NULL NULL NULL 5 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 5 20.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
1 PRIMARY t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t1.a1,test.t1.a2 1 100.00 Using index; Start temporary
-1 PRIMARY t3c ALL NULL NULL NULL NULL 4 100.00 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t3c ALL NULL NULL NULL NULL 4 15.00 Using where; End temporary; Using join buffer (flat, BNL join)
4 MATERIALIZED t3b ALL NULL NULL NULL NULL 4 100.00 Using where
3 DEPENDENT SUBQUERY t3a ALL NULL NULL NULL NULL 4 100.00 Using where
Warnings:
@@ -658,7 +646,7 @@ from t1_16
where a1 in (select b1 from t2_16 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1_16 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t2_16 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_16 ALL NULL NULL NULL NULL 3 33.33 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select left(`test`.`t1_16`.`a1`,7) AS `left(a1,7)`,left(`test`.`t1_16`.`a2`,7) AS `left(a2,7)` from `test`.`t1_16` semi join (`test`.`t2_16`) where `test`.`t2_16`.`b1` = `test`.`t1_16`.`a1` and `test`.`t1_16`.`a1` > '0'
select left(a1,7), left(a2,7)
@@ -672,7 +660,7 @@ from t1_16
where (a1,a2) in (select b1, b2 from t2_16 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1_16 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t2_16 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_16 ALL NULL NULL NULL NULL 3 33.33 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select left(`test`.`t1_16`.`a1`,7) AS `left(a1,7)`,left(`test`.`t1_16`.`a2`,7) AS `left(a2,7)` from `test`.`t1_16` semi join (`test`.`t2_16`) where `test`.`t2_16`.`b1` = `test`.`t1_16`.`a1` and `test`.`t2_16`.`b2` = `test`.`t1_16`.`a2` and `test`.`t1_16`.`a1` > '0'
select left(a1,7), left(a2,7)
@@ -739,7 +727,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1_16 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; Using join buffer (flat, BNL join)
1 PRIMARY t2_16 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
1 PRIMARY t3 ALL NULL NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY t2 ALL NULL NULL NULL NULL 5 100.00 Using where; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 5 0.56 Using where; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t3` join `test`.`t2_16` join `test`.`t2` join `test`.`t1_16`) where `test`.`t2`.`b1` = `test`.`t3`.`c1` and `test`.`t2_16`.`b1` = `test`.`t1_16`.`a1` and `test`.`t2_16`.`b2` = `test`.`t1_16`.`a2` and `test`.`t2`.`b2` = substr(`test`.`t1_16`.`a2`,1,6) and `test`.`t3`.`c2` > '0' and concat(`test`.`t1`.`a1`,'x') = left(`test`.`t1_16`.`a1`,8)
drop table t1_16, t2_16, t3_16;
@@ -773,7 +761,7 @@ from t1_512
where a1 in (select b1 from t2_512 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1_512 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t2_512 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_512 ALL NULL NULL NULL NULL 3 33.33 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select left(`test`.`t1_512`.`a1`,7) AS `left(a1,7)`,left(`test`.`t1_512`.`a2`,7) AS `left(a2,7)` from `test`.`t1_512` semi join (`test`.`t2_512`) where `test`.`t2_512`.`b1` = `test`.`t1_512`.`a1` and `test`.`t1_512`.`a1` > '0'
select left(a1,7), left(a2,7)
@@ -787,7 +775,7 @@ from t1_512
where (a1,a2) in (select b1, b2 from t2_512 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1_512 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t2_512 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_512 ALL NULL NULL NULL NULL 3 33.33 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select left(`test`.`t1_512`.`a1`,7) AS `left(a1,7)`,left(`test`.`t1_512`.`a2`,7) AS `left(a2,7)` from `test`.`t1_512` semi join (`test`.`t2_512`) where `test`.`t2_512`.`b1` = `test`.`t1_512`.`a1` and `test`.`t2_512`.`b2` = `test`.`t1_512`.`a2` and `test`.`t1_512`.`a1` > '0'
select left(a1,7), left(a2,7)
@@ -877,7 +865,7 @@ from t1_1024
where a1 in (select b1 from t2_1024 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1_1024 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t2_1024 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_1024 ALL NULL NULL NULL NULL 3 33.33 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select left(`test`.`t1_1024`.`a1`,7) AS `left(a1,7)`,left(`test`.`t1_1024`.`a2`,7) AS `left(a2,7)` from `test`.`t1_1024` semi join (`test`.`t2_1024`) where `test`.`t2_1024`.`b1` = `test`.`t1_1024`.`a1` and `test`.`t1_1024`.`a1` > '0'
select left(a1,7), left(a2,7)
@@ -891,7 +879,7 @@ from t1_1024
where (a1,a2) in (select b1, b2 from t2_1024 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1_1024 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t2_1024 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_1024 ALL NULL NULL NULL NULL 3 33.33 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select left(`test`.`t1_1024`.`a1`,7) AS `left(a1,7)`,left(`test`.`t1_1024`.`a2`,7) AS `left(a2,7)` from `test`.`t1_1024` semi join (`test`.`t2_1024`) where `test`.`t2_1024`.`b1` = `test`.`t1_1024`.`a1` and `test`.`t2_1024`.`b2` = `test`.`t1_1024`.`a2` and `test`.`t1_1024`.`a1` > '0'
select left(a1,7), left(a2,7)
@@ -905,7 +893,7 @@ from t1_1024
where a1 in (select substring(b1,1,1024) from t2_1024 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1_1024 ALL NULL NULL NULL NULL 3 100.00
-1 PRIMARY t2_1024 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_1024 ALL NULL NULL NULL NULL 3 33.33 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select left(`test`.`t1_1024`.`a1`,7) AS `left(a1,7)`,left(`test`.`t1_1024`.`a2`,7) AS `left(a2,7)` from `test`.`t1_1024` semi join (`test`.`t2_1024`) where `test`.`t2_1024`.`b1` > '0' and `test`.`t1_1024`.`a1` = substr(`test`.`t2_1024`.`b1`,1,1024)
select left(a1,7), left(a2,7)
@@ -980,7 +968,7 @@ from t1_1025
where a1 in (select b1 from t2_1025 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1_1025 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t2_1025 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_1025 ALL NULL NULL NULL NULL 3 33.33 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select left(`test`.`t1_1025`.`a1`,7) AS `left(a1,7)`,left(`test`.`t1_1025`.`a2`,7) AS `left(a2,7)` from `test`.`t1_1025` semi join (`test`.`t2_1025`) where `test`.`t2_1025`.`b1` = `test`.`t1_1025`.`a1` and `test`.`t1_1025`.`a1` > '0'
select left(a1,7), left(a2,7)
@@ -994,7 +982,7 @@ from t1_1025
where (a1,a2) in (select b1, b2 from t2_1025 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1_1025 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t2_1025 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_1025 ALL NULL NULL NULL NULL 3 33.33 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select left(`test`.`t1_1025`.`a1`,7) AS `left(a1,7)`,left(`test`.`t1_1025`.`a2`,7) AS `left(a2,7)` from `test`.`t1_1025` semi join (`test`.`t2_1025`) where `test`.`t2_1025`.`b1` = `test`.`t1_1025`.`a1` and `test`.`t2_1025`.`b2` = `test`.`t1_1025`.`a2` and `test`.`t1_1025`.`a1` > '0'
select left(a1,7), left(a2,7)
@@ -1008,7 +996,7 @@ from t1_1025
where a1 in (select substring(b1,1,1025) from t2_1025 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1_1025 ALL NULL NULL NULL NULL 3 100.00
-1 PRIMARY t2_1025 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2_1025 ALL NULL NULL NULL NULL 3 33.33 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select left(`test`.`t1_1025`.`a1`,7) AS `left(a1,7)`,left(`test`.`t1_1025`.`a2`,7) AS `left(a2,7)` from `test`.`t1_1025` semi join (`test`.`t2_1025`) where `test`.`t2_1025`.`b1` > '0' and `test`.`t1_1025`.`a1` = substr(`test`.`t2_1025`.`b1`,1,1025)
select left(a1,7), left(a2,7)
@@ -1090,7 +1078,7 @@ from t1bb
where (a1, a2) in (select b1, b2 from t2bb);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1bb ALL NULL NULL NULL NULL 3 100.00
-1 PRIMARY t2bb ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+1 PRIMARY t2bb ALL NULL NULL NULL NULL 3 33.33 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select conv(`test`.`t1bb`.`a1`,10,2) AS `bin(a1)`,`test`.`t1bb`.`a2` AS `a2` from `test`.`t1bb` semi join (`test`.`t2bb`) where `test`.`t2bb`.`b1` = `test`.`t1bb`.`a1` and `test`.`t2bb`.`b2` = `test`.`t1bb`.`a2`
select bin(a1), a2
@@ -1152,11 +1140,10 @@ create index it1a on t1(a);
explain extended
select a from t1 where a in (select c from t2 where d >= 20);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 index it1a it1a 4 NULL 7 100.00 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 6 100.00 Using where; Start temporary
+1 PRIMARY t1 ref it1a it1a 4 test.t2.c 1 16.67 Using index; End temporary
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`d` >= 20
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t1`.`a` = `test`.`t2`.`c` and `test`.`t2`.`d` >= 20
select a from t1 where a in (select c from t2 where d >= 20);
a
2
@@ -1167,11 +1154,10 @@ insert into t2 values (1,10);
explain extended
select a from t1 where a in (select c from t2 where d >= 20);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 index it1a it1a 4 NULL 7 100.00 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 7 100.00 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 7 100.00 Using where; Start temporary
+1 PRIMARY t1 ref it1a it1a 4 test.t2.c 1 14.29 Using index; End temporary
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`d` >= 20
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t1`.`a` = `test`.`t2`.`c` and `test`.`t2`.`d` >= 20
select a from t1 where a in (select c from t2 where d >= 20);
a
2
@@ -1181,7 +1167,7 @@ a
explain extended
select a from t1 group by a having a in (select c from t2 where d >= 20);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 range NULL it1a 4 NULL 8 100.00 Using index for group-by
+1 PRIMARY t1 range NULL it1a 4 NULL 7 100.00 Using index for group-by
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 7 100.00 Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` group by `test`.`t1`.`a` having <expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,`test`.`t1`.`a` in ( <materialize> (/* select#2 */ select `test`.`t2`.`c` from `test`.`t2` where `test`.`t2`.`d` >= 20 ), <primary_index_lookup>(`test`.`t1`.`a` in <temporary table> on distinct_key where `test`.`t1`.`a` = `<subquery2>`.`c`))))
@@ -1193,7 +1179,7 @@ create index iab on t1(a, b);
explain extended
select a from t1 group by a having a in (select c from t2 where d >= 20);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 range NULL it1a 4 NULL 8 100.00 Using index for group-by
+1 PRIMARY t1 range NULL it1a 4 NULL 7 100.00 Using index for group-by
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 7 100.00 Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` group by `test`.`t1`.`a` having <expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,`test`.`t1`.`a` in ( <materialize> (/* select#2 */ select `test`.`t2`.`c` from `test`.`t2` where `test`.`t2`.`d` >= 20 ), <primary_index_lookup>(`test`.`t1`.`a` in <temporary table> on distinct_key where `test`.`t1`.`a` = `<subquery2>`.`c`))))
@@ -1205,7 +1191,7 @@ explain extended
select a from t1 group by a
having a in (select c from t2 where d >= some(select e from t3 where max(b)=e));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 range NULL iab 4 NULL 8 100.00 Using index for group-by
+1 PRIMARY t1 range NULL iab 4 NULL 7 100.00 Using index for group-by
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 7 100.00 Using where
3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 4 100.00 Using where
Warnings:
@@ -1222,7 +1208,7 @@ select a from t1
where a in (select c from t2 where d >= some(select e from t3 where b=e));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 7 100.00 Start temporary
-1 PRIMARY t1 ref it1a,iab iab 4 test.t2.c 1 100.00 Using where; Using index; End temporary
+1 PRIMARY t1 ref it1a,iab iab 4 test.t2.c 1 9.41 Using where; Using index; End temporary
3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 4 100.00 Using where
Warnings:
Note 1276 Field or reference 'test.t1.b' of SELECT #3 was resolved in SELECT #1
@@ -1549,13 +1535,15 @@ SET @@optimizer_switch='semijoin=on,materialization=on';
EXPLAIN SELECT COUNT(*) FROM t1 WHERE (f1,f2) IN (SELECT f1,f2 FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 7 func,func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2
+1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
SELECT COUNT(*) FROM t1 WHERE (f1,f2) IN (SELECT f1,f2 FROM t2);
COUNT(*)
2
set @@optimizer_switch= @local_optimizer_switch;
DROP TABLE t1, t2;
+#
+# BUG#46548 IN-subqueries return 0 rows with materialization=on
+#
CREATE TABLE t1 (
pk int,
a varchar(1),
@@ -1565,16 +1553,19 @@ d varchar(4),
PRIMARY KEY (pk)
);
INSERT INTO t1 VALUES (1,'o','ffff','ffff','ffoo'),(2,'f','ffff','ffff','ffff');
+insert into t1 select seq,'x','xxxx','xxxx','xxxx' from seq_10_to_40;
CREATE TABLE t2 LIKE t1;
INSERT INTO t2 VALUES (1,'i','iiii','iiii','iiii'),(2,'f','ffff','ffff','ffff');
+insert into t2 select -seq,'a','aaaa','aaaa','aaaa' from seq_1_to_20;
+insert into t2 select seq,'b','bbbb','bbbb','bbbb' from seq_100_to_200;
set @local_optimizer_switch=@@optimizer_switch;
set @@optimizer_switch=@optimizer_switch_local_default;
SET @@optimizer_switch='semijoin=on,materialization=on';
EXPLAIN SELECT pk FROM t1 WHERE (a) IN (SELECT a FROM t2 WHERE pk > 0);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 2
+1 PRIMARY t1 ALL NULL NULL NULL NULL 33
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 range PRIMARY PRIMARY 4 NULL 2 Using index condition; Using where; Rowid-ordered scan
+2 MATERIALIZED t2 ALL PRIMARY NULL NULL NULL 123 Using where
SELECT pk FROM t1 WHERE (a) IN (SELECT a FROM t2 WHERE pk > 0);
pk
2
@@ -1893,7 +1884,7 @@ SELECT * FROM t1
WHERE a IN ( SELECT MIN(a) FROM t1 );
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 system a NULL NULL NULL 1 100.00
-1 PRIMARY <subquery2> system NULL NULL NULL NULL 1 100.00
+1 PRIMARY <subquery2> system NULL NULL NULL NULL 0 0.00
2 MATERIALIZED NULL NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
Warnings:
Note 1003 /* select#1 */ select 8 AS `a` from dual where 1
@@ -1935,12 +1926,13 @@ DROP TABLE t1,t2;
#
create table t1 (a int, b int);
insert into t1 values (7,5), (3,3), (5,4), (9,3);
+insert into t1 select seq,seq from seq_100_to_200;
create table t2 (a int, b int, index i_a(a));
insert into t2 values
(4,2), (7,9), (7,4), (3,1), (5,3), (3,1), (9,4), (8,1);
explain select * from t1 where t1.a in (select a from t2 where t2.a=7 or t2.b<=1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
+1 PRIMARY t1 ALL NULL NULL NULL NULL 105
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
2 MATERIALIZED t2 ALL i_a NULL NULL NULL 8 Using where
select * from t1 where t1.a in (select a from t2 where t2.a=7 or t2.b<=1);
@@ -2031,10 +2023,9 @@ WHERE (a, c) IN (SELECT s1.b, s1.c FROM t2 AS s1, t2 AS s2
WHERE s2.d = s1.e AND s1.e = (SELECT MAX(e) FROM t2));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
-1 PRIMARY t2 index c c 5 NULL 8 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1
-2 MATERIALIZED s2 ref d d 4 const 2 Using where; Using index
-2 MATERIALIZED s1 ALL c NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t2 index c c 5 NULL 8 Using where; Using index
+1 PRIMARY s1 ref c c 5 test.t2.c 1 Using where
+1 PRIMARY s2 ref d d 4 const 2 Using where; Using index; FirstMatch(t2)
3 SUBQUERY t2 ALL NULL NULL NULL NULL 8
SELECT a, c FROM t1, t2
WHERE (a, c) IN (SELECT s1.b, s1.c FROM t2 AS s1, t2 AS s2
@@ -2051,10 +2042,9 @@ WHERE (a, c) IN (SELECT s1.b, s1.c FROM t2 AS s1, t2 AS s2
WHERE s2.d = s1.e AND s1.e = (SELECT MAX(e) FROM t2));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
-1 PRIMARY t2 index c c 5 NULL 8 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1
-2 MATERIALIZED s2 ref d d 4 const 2 Using where; Using index
-2 MATERIALIZED s1 hash_ALL c #hash#$hj 5 const 8 Using where; Using join buffer (flat, BNLH join)
+1 PRIMARY t2 index c c 5 NULL 8 Using where; Using index
+1 PRIMARY s1 hash_ALL c #hash#c 5 test.t2.c 8 Using where; Using join buffer (flat, BNLH join)
+1 PRIMARY s2 hash_range d #hash#d:d 4:4 const 2 Using where; Using index; FirstMatch(t2); Using join buffer (incremental, BNLH join)
3 SUBQUERY t2 ALL NULL NULL NULL NULL 8
SELECT a, c FROM t1, t2
WHERE (a, c) IN (SELECT s1.b, s1.c FROM t2 AS s1, t2 AS s2
@@ -2236,9 +2226,8 @@ mysqltest1
EXPLAIN EXTENDED
SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2 100.00 Using temporary; Using filesort
-1 PRIMARY t1 eq_ref db db 764 information_schema.schemata.SCHEMA_NAME 1 100.00 Using where; Using index
-2 MATERIALIZED schemata ALL NULL NULL NULL NULL NULL NULL
+1 PRIMARY t1 index db db 764 NULL 4 100.00 Using index; Using temporary; Using filesort
+1 PRIMARY schemata ALL NULL NULL NULL NULL NULL NULL Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`db` AS `db` from `test`.`t1` semi join (`information_schema`.`schemata`) where `test`.`t1`.`db` = `information_schema`.`schemata`.`SCHEMA_NAME` order by `test`.`t1`.`db` desc
drop table t1;
@@ -2270,8 +2259,10 @@ drop table t1;
CREATE TABLE t1 (
pk INT, f1 INT NOT NULL, f2 VARCHAR(3), f3 INT NULL, PRIMARY KEY(pk)) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,1,'foo',8), (2,5,'bar',7);
+create table t2 like t1;
+insert into t2 select * from t1;
SELECT sq1.f2 FROM t1 AS sq1
-WHERE EXISTS ( SELECT * FROM t1 AS sq2
+WHERE EXISTS ( SELECT * FROM t2 AS sq2
WHERE sq1.`pk` IN ( SELECT f1 FROM t1 ) AND sq2.f1 = sq1.f1 );
f2
foo
@@ -2283,18 +2274,17 @@ WHERE EXISTS ( SELECT * FROM t1 AS sq2
WHERE sq1.`pk` IN ( SELECT f1 FROM t1 ) AND sq2.f1 = sq1.f1 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY sq1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 func 1
+2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using where; FirstMatch
2 DEPENDENT SUBQUERY sq2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
-3 MATERIALIZED t1 ALL NULL NULL NULL NULL 2
# this checks the result set above
set optimizer_switch= 'materialization=off,semijoin=off';
SELECT sq1.f2 FROM t1 AS sq1
-WHERE EXISTS ( SELECT * FROM t1 AS sq2
+WHERE EXISTS ( SELECT * FROM t2 AS sq2
WHERE sq1.`pk` IN ( SELECT f1 FROM t1 ) AND sq2.f1 = sq1.f1 );
f2
foo
set optimizer_switch= @local_optimizer_switch;
-DROP TABLE t1;
+DROP TABLE t1,t2;
#
# MDEV-12145: IN subquery used in WHERE of EXISTS subquery
#
@@ -2317,10 +2307,9 @@ WHERE EXISTS ( SELECT * FROM t2, t3
WHERE i3 = i2 AND f1 IN ( SELECT f3 FROM t3 ) );
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-2 DEPENDENT SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
+2 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 8 12.50 Using where; FirstMatch
2 DEPENDENT SUBQUERY t2 range i2 i2 5 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY t3 ref i3 i3 5 test.t2.i2 2 100.00 Using index
-3 MATERIALIZED t3 ALL NULL NULL NULL NULL 8 100.00
+2 DEPENDENT SUBQUERY t3 ref i3 i3 5 test.t2.i2 1 100.00 Using index
Warnings:
Note 1276 Field or reference 'test.t1.f1' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select `test`.`t1`.`f1` AS `f1` from `test`.`t1` where <expr_cache><`test`.`t1`.`f1`>(exists(/* select#2 */ select 1 from `test`.`t2` semi join (`test`.`t3`) join `test`.`t3` where `test`.`t3`.`i3` = `test`.`t2`.`i2` and `test`.`t1`.`f1` = `test`.`t3`.`f3` limit 1))
@@ -2356,9 +2345,8 @@ SELECT pk, f1, ( SELECT COUNT(*) FROM t2
WHERE t1.pk IN ( SELECT f2 FROM t2 ) ) AS sq FROM t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00
-2 DEPENDENT SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
+2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 20.00 Using where; FirstMatch
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using join buffer (flat, BNL join)
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00
Warnings:
Note 1276 Field or reference 'test.t1.pk' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`f1` AS `f1`,<expr_cache><`test`.`t1`.`pk`>((/* select#2 */ select count(0) from `test`.`t2` semi join (`test`.`t2`) where `test`.`t1`.`pk` = `test`.`t2`.`f2`)) AS `sq` from `test`.`t1`
@@ -2441,11 +2429,10 @@ WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
t2.user = '86826bf03710200044e0bfc8bcbe5d79');
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t2.ugroup 2 Using where
+1 PRIMARY t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where; Start temporary
+1 PRIMARY t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
+1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t3_i.sys_id 2 Using index condition; Using where; End temporary
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 32 test.t1.assignment_group 1 Using where; Using index
-2 MATERIALIZED t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where
-2 MATERIALIZED t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
set statement optimizer_prune_level=1 for explain SELECT t1.assignment_group
FROM t1, t3
WHERE t1.assignment_group = t3.sys_id AND
@@ -2456,11 +2443,10 @@ WHERE t2.ugroup = t3_i.sys_id AND
t3_i.type LIKE '59e22fb137032000158bbfc8bcbe5d52' AND
t2.user = '86826bf03710200044e0bfc8bcbe5d79');
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery3> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t2.ugroup 2 Using where
+1 PRIMARY t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where; Start temporary
+1 PRIMARY t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
+1 PRIMARY t1 ref idx1,idx2 idx1 35 test.t3_i.sys_id 2 Using index condition; Using where; End temporary
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 32 test.t1.assignment_group 1 Using where; Using index
-3 MATERIALIZED t2 ref idx3,idx4 idx4 35 const 2 Using index condition; Using where
-3 MATERIALIZED t3_i eq_ref PRIMARY PRIMARY 32 test.t2.ugroup 1 Using index condition; Using where
SELECT t1.assignment_group
FROM t1, t3
WHERE t1.assignment_group = t3.sys_id AND
@@ -2492,8 +2478,7 @@ explain
SELECT 1 FROM t1 where t1.id IN (SELECT t2.i1 FROM t2 WHERE t2.i1 = t2.i2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 9
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY t2 hash_ALL NULL #hash#$hj 8 test.t1.id,test.t1.id 3 Using where; FirstMatch(t1); Using join buffer (flat, BNLH join)
SELECT 1 FROM t1 where t1.id IN (SELECT t2.i1 FROM t2 WHERE t2.i1 = t2.i2);
1
1
@@ -2505,8 +2490,7 @@ explain
SELECT 1 FROM t1 where t1.id IN (SELECT t2.i1 FROM t2 WHERE t2.i1 = t2.i2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index id id 4 NULL 9 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
SELECT 1 FROM t1 where t1.id IN (SELECT t2.i1 FROM t2 WHERE t2.i1 = t2.i2);
1
1
@@ -2555,20 +2539,17 @@ drop procedure prepare_data;
set @@optimizer_switch= @local_optimizer_switch;
drop table t1,t2,t3;
CREATE TABLE t1 ( id int NOT NULL, key(id));
-INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19);
+INSERT INTO t1 select seq from seq_11_to_39;
CREATE TABLE t2 (i1 int NOT NULL, i2 int NOT NULL);
-INSERT INTO t2 VALUES (11,11),(12,12),(13,13);
+INSERT INTO t2 select seq,seq+1 from seq_11_to_50;
CREATE VIEW v1 AS SELECT t2.i1 FROM t2 where t2.i1 = t2.i2;
explain SELECT 1 FROM t1 where t1.id IN (SELECT v1.i1 from v1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 index id id 4 NULL 9 Using index
+1 PRIMARY t1 index id id 4 NULL 29 Using index
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 40 Using where
SELECT 1 FROM t1 where t1.id IN (SELECT v1.i1 from v1);
1
-1
-1
-1
drop table t1,t2;
drop view v1;
#
diff --git a/mysql-test/main/subselect_sj_mat.test b/mysql-test/main/subselect_sj_mat.test
index 6a9c78adc52..4302fc10c81 100644
--- a/mysql-test/main/subselect_sj_mat.test
+++ b/mysql-test/main/subselect_sj_mat.test
@@ -4,6 +4,7 @@
#
--source include/default_optimizer_switch.inc
+--source include/have_sequence.inc
set optimizer_switch=ifnull(@subselect_mat_test_optimizer_switch_value, 'semijoin=on,firstmatch=on,loosescan=on,semijoin_with_cache=on');
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
@@ -1174,9 +1175,9 @@ set @@optimizer_switch= @local_optimizer_switch;
DROP TABLE t1, t2;
-#
-# BUG#46548 IN-subqueries return 0 rows with materialization=on
-#
+--echo #
+--echo # BUG#46548 IN-subqueries return 0 rows with materialization=on
+--echo #
CREATE TABLE t1 (
pk int,
a varchar(1),
@@ -1186,9 +1187,12 @@ CREATE TABLE t1 (
PRIMARY KEY (pk)
);
INSERT INTO t1 VALUES (1,'o','ffff','ffff','ffoo'),(2,'f','ffff','ffff','ffff');
+insert into t1 select seq,'x','xxxx','xxxx','xxxx' from seq_10_to_40;
CREATE TABLE t2 LIKE t1;
INSERT INTO t2 VALUES (1,'i','iiii','iiii','iiii'),(2,'f','ffff','ffff','ffff');
+insert into t2 select -seq,'a','aaaa','aaaa','aaaa' from seq_1_to_20;
+insert into t2 select seq,'b','bbbb','bbbb','bbbb' from seq_100_to_200;
set @local_optimizer_switch=@@optimizer_switch;
set @@optimizer_switch=@optimizer_switch_local_default;
@@ -1553,6 +1557,7 @@ DROP TABLE t1,t2;
--echo #
create table t1 (a int, b int);
insert into t1 values (7,5), (3,3), (5,4), (9,3);
+insert into t1 select seq,seq from seq_100_to_200;
create table t2 (a int, b int, index i_a(a));
@@ -1883,9 +1888,11 @@ drop table t1;
CREATE TABLE t1 (
pk INT, f1 INT NOT NULL, f2 VARCHAR(3), f3 INT NULL, PRIMARY KEY(pk)) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,1,'foo',8), (2,5,'bar',7);
+create table t2 like t1;
+insert into t2 select * from t1;
SELECT sq1.f2 FROM t1 AS sq1
- WHERE EXISTS ( SELECT * FROM t1 AS sq2
+ WHERE EXISTS ( SELECT * FROM t2 AS sq2
WHERE sq1.`pk` IN ( SELECT f1 FROM t1 ) AND sq2.f1 = sq1.f1 );
set @local_optimizer_switch= @@optimizer_switch;
@@ -1899,12 +1906,12 @@ SELECT sq1.f2 FROM t1 AS sq1
--echo # this checks the result set above
set optimizer_switch= 'materialization=off,semijoin=off';
SELECT sq1.f2 FROM t1 AS sq1
- WHERE EXISTS ( SELECT * FROM t1 AS sq2
+ WHERE EXISTS ( SELECT * FROM t2 AS sq2
WHERE sq1.`pk` IN ( SELECT f1 FROM t1 ) AND sq2.f1 = sq1.f1 );
set optimizer_switch= @local_optimizer_switch;
-DROP TABLE t1;
+DROP TABLE t1,t2;
--echo #
--echo # MDEV-12145: IN subquery used in WHERE of EXISTS subquery
@@ -2248,9 +2255,9 @@ set @@optimizer_switch= @local_optimizer_switch;
drop table t1,t2,t3;
CREATE TABLE t1 ( id int NOT NULL, key(id));
-INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19);
+INSERT INTO t1 select seq from seq_11_to_39;
CREATE TABLE t2 (i1 int NOT NULL, i2 int NOT NULL);
-INSERT INTO t2 VALUES (11,11),(12,12),(13,13);
+INSERT INTO t2 select seq,seq+1 from seq_11_to_50;
CREATE VIEW v1 AS SELECT t2.i1 FROM t2 where t2.i1 = t2.i2;
explain SELECT 1 FROM t1 where t1.id IN (SELECT v1.i1 from v1);
SELECT 1 FROM t1 where t1.id IN (SELECT v1.i1 from v1);
diff --git a/mysql-test/main/subselect_sj_nonmerged.result b/mysql-test/main/subselect_sj_nonmerged.result
index a3e6c493930..2413ce3a557 100644
--- a/mysql-test/main/subselect_sj_nonmerged.result
+++ b/mysql-test/main/subselect_sj_nonmerged.result
@@ -47,8 +47,8 @@ id select_type table type possible_keys key key_len ref rows Extra
# Compare to this which really will have 50 record combinations:
explain select * from t3 where a in (select max(t2.a) from t1, t2 group by t2.b, t1.b);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 index PRIMARY PRIMARY 8 NULL 100 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t3.a 1 Using where
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 50
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 8 <subquery2>.max(t2.a) 1 Using where; Using index
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary
2 MATERIALIZED t1 ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
SET @save_optimizer_switch=@@optimizer_switch;
@@ -57,8 +57,8 @@ SET optimizer_switch='outer_join_with_cache=off';
explain select * from t3
where a in (select max(t2.a) from t1 left join t2 on t1.a=t2.a group by t2.b, t1.b);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 index PRIMARY PRIMARY 8 NULL 100 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t3.a 1 Using where
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 50
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 8 <subquery2>.max(t2.a) 1 Using where; Using index
2 MATERIALIZED t1 ALL NULL NULL NULL NULL 10 Using temporary
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using where
SET optimizer_switch=@save_optimizer_switch;
diff --git a/mysql-test/main/system_mysql_db_error_log.result b/mysql-test/main/system_mysql_db_error_log.result
index 5af3eda0700..c2129b7df31 100644
--- a/mysql-test/main/system_mysql_db_error_log.result
+++ b/mysql-test/main/system_mysql_db_error_log.result
@@ -110,7 +110,7 @@ host='localhost' and user='good_version_id_100500';
FLUSH PRIVILEGES;
SHOW GRANTS FOR good_version_id_100500@localhost;
Grants for good_version_id_100500@localhost
-GRANT SUPER, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, BINLOG ADMIN, BINLOG REPLAY ON *.* TO `good_version_id_100500`@`localhost`
+GRANT SUPER, BINLOG MONITOR, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY, SLAVE MONITOR ON *.* TO `good_version_id_100500`@`localhost`
DROP USER good_version_id_100500@localhost;
FOUND 1 /Warning.*'user' entry 'bad_access1@localhost' has a wrong 'access' value.*version_id=/ in system_mysql_db_error_log.err
FOUND 1 /Warning.*'user' entry 'bad_version_id_1000000@localhost' has a wrong 'version_id' value 1000000/ in system_mysql_db_error_log.err
diff --git a/mysql-test/main/table_elim.result b/mysql-test/main/table_elim.result
index 4da85c4a9ca..580b1cf5a0d 100644
--- a/mysql-test/main/table_elim.result
+++ b/mysql-test/main/table_elim.result
@@ -563,9 +563,9 @@ JOIN t5 ON t4.f3 ON t3.f1 = t5.f5 ON t2.f4 = t3.f4
WHERE t3.f2 ;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 Using where
-1 SIMPLE t5 ref f5 f5 5 test.t3.f1 2 Using where; Using index
+1 SIMPLE t5 ref f5 f5 5 test.t3.f1 1 Using where; Using index
1 SIMPLE t4 ALL NULL NULL NULL NULL 3 Using where
-1 SIMPLE t2 ALL f4 NULL NULL NULL 11 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ref f4 f4 1003 test.t3.f4 1 Using where
# ^^ The above must not produce a QEP of t3,t5,t2,t4
# as that violates the "no interleaving of outer join nests" rule.
DROP TABLE t1,t2,t3,t4,t5;
@@ -737,13 +737,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -769,13 +772,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -788,12 +794,15 @@ EXPLAIN
"key_length": "8",
"used_key_parts": ["b"],
"ref": ["test.t1.a"],
+ "loops": 10,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "trigcond(t1.a = v2b.b and trigcond(t1.a is not null))",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"filesort": {
"sort_key": "t11.a",
@@ -803,7 +812,9 @@ EXPLAIN
"table": {
"table_name": "t11",
"access_type": "ALL",
+ "loops": 1,
"rows": 1000,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -831,13 +842,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -857,13 +871,16 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"nested_loop": [
{
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -876,12 +893,15 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["b"],
"ref": ["test.t1.a"],
+ "loops": 10,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "trigcond(trigcond(t1.a is not null))",
"materialized": {
"query_block": {
"select_id": 2,
+ "cost": "COST_REPLACED",
"const_condition": "1",
"filesort": {
"sort_key": "t11.a",
@@ -891,7 +911,9 @@ EXPLAIN
"table": {
"table_name": "t11",
"access_type": "ALL",
+ "loops": 1,
"rows": 1000,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -904,7 +926,9 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["pk"],
"ref": ["test.t11.b"],
+ "loops": 1000,
"rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"attached_condition": "trigcond(trigcond(t11.b is not null))"
}
@@ -946,7 +970,7 @@ group by yyy;
explain select t1.* from t1 left join v2e on v2e.yyy=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 10
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 Using where
2 DERIVED NULL NULL NULL NULL NULL NULL NULL no matching row in const table
create table t2 (a int, b int, c int);
insert into t2 select A.seq, B.seq, 123 from seq_1_to_3 A, seq_1_to_3 B;
@@ -956,14 +980,14 @@ explain select t1.* from t1 left join
(select a, count(*) as cnt from t2 group by a, b) D on D.a=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 10
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 1 Using where
2 DERIVED t2 ALL NULL NULL NULL NULL 9 Using temporary; Using filesort
# Still no elimination 'cause field D.b is just an alias for t2.a
explain select t1.* from t1 left join
(select a, a as b, count(*) as cnt from t2 group by a, b) D on D.a=t1.a and D.b=t1.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 10
-1 PRIMARY <derived2> ref key0 key0 10 test.t1.a,test.t1.b 2 Using where
+1 PRIMARY <derived2> ref key0 key0 10 test.t1.a,test.t1.b 1 Using where
2 DERIVED t2 ALL NULL NULL NULL NULL 9 Using temporary; Using filesort
Warnings:
Warning 1052 Column 'b' in group statement is ambiguous
diff --git a/mysql-test/main/table_elim.test b/mysql-test/main/table_elim.test
index d6bf925c2c0..26c56630784 100644
--- a/mysql-test/main/table_elim.test
+++ b/mysql-test/main/table_elim.test
@@ -680,6 +680,7 @@ group by t11.a;
explain select t1.* from t1 left join v2b on v2b.a=t1.a;
--echo # Check format JSON as well
+--source include/explain-no-costs.inc
explain format=JSON select t1.* from t1 left join v2b on t1.a=v2b.a;
--echo # Elimination of a whole subquery
@@ -692,6 +693,7 @@ explain select t1.* from t1 left join
explain select t1.* from t1 left join v2b on t1.a=v2b.b;
--echo # Check format JSON as well
+--source include/explain-no-costs.inc
explain format=JSON select t1.* from t1 left join v2b on t1.a=v2b.b;
create view v2c as
@@ -703,12 +705,14 @@ group by t11.a;
explain select t1.* from t1 left join v2c on v2c.a=t1.a;
--echo # Check format JSON as well
+--source include/explain-no-costs.inc
explain format=JSON select t1.* from t1 left join v2c on v2c.a=t1.a;
--echo # In this case v2c cannot be eliminated (since v2c.b is not unique)!
explain select t1.* from t1 left join v2c on t1.a=v2c.b;
--echo # Check format JSON as well
+--source include/explain-no-costs.inc
explain format=JSON select t1.* from t1 left join v2c on t1.a=v2c.b;
--echo # Create a view with multiple fields in the GROUP BY clause:
diff --git a/mysql-test/main/table_value_constr.result b/mysql-test/main/table_value_constr.result
index e6d973d53ee..86479c843c2 100644
--- a/mysql-test/main/table_value_constr.result
+++ b/mysql-test/main/table_value_constr.result
@@ -567,12 +567,12 @@ where t1.a=t2.a and st<3
select * from t2;
a b st
1 1 1
-1 2 2
1 1 2
-1 2 3
-1 2 3
1 1 3
1 1 3
+1 2 2
+1 2 3
+1 2 3
# recursive CTE that uses VALUES structure(s) : computation of factorial (first 10 elements)
with recursive fact(n,f) as
(
@@ -743,21 +743,19 @@ a b
explain extended select * from t1
where a in (values (1));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived2> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1)) `tvc_0` join `test`.`t1` where `tvc_0`.`1` = `test`.`t1`.`a`
explain extended select * from t1
where a in (select * from (values (1)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1)) `tvc_0` join `test`.`t1` where `tvc_0`.`1` = `test`.`t1`.`a`
# IN-subquery with VALUES structure(s) : UNION with VALUES on the first place
select * from t1
where a in (values (1) union select 2);
@@ -776,7 +774,7 @@ explain extended select * from t1
where a in (values (1) union select 2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-4 DEPENDENT SUBQUERY <derived2> ref key0 key0 4 func 2 100.00
+4 DEPENDENT SUBQUERY <derived2> eq_ref distinct_key distinct_key 4 func 1 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union4,3> ALL NULL NULL NULL NULL NULL NULL
@@ -787,7 +785,7 @@ where a in (select * from (values (1)) as tvc_0 union
select 2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> ref key0 key0 4 func 2 100.00
+2 DEPENDENT SUBQUERY <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
4 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,4> ALL NULL NULL NULL NULL NULL NULL
@@ -812,7 +810,7 @@ where a in (select 2 union values (1));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-4 DEPENDENT UNION <derived3> ref key0 key0 4 func 2 100.00
+4 DEPENDENT UNION <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,4> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -823,7 +821,7 @@ select * from (values (1)) tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-3 DEPENDENT UNION <derived4> ref key0 key0 4 func 2 100.00
+3 DEPENDENT UNION <derived4> ref key0 key0 4 func 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -848,7 +846,7 @@ explain extended select * from t1
where a in (values (1) union all select b from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-4 DEPENDENT SUBQUERY <derived2> ref key0 key0 4 func 2 100.00
+4 DEPENDENT SUBQUERY <derived2> eq_ref distinct_key distinct_key 4 func 1 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
3 DEPENDENT UNION t1 ALL NULL NULL NULL NULL 6 100.00 Using where
Warnings:
@@ -858,7 +856,7 @@ where a in (select * from (values (1)) as tvc_0 union all
select b from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> ref key0 key0 4 func 2 100.00
+2 DEPENDENT SUBQUERY <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
4 DEPENDENT UNION t1 ALL NULL NULL NULL NULL 6 100.00 Using where
Warnings:
@@ -880,18 +878,18 @@ explain extended select * from t1
where a not in (values (1),(2));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-3 MATERIALIZED <derived2> ALL NULL NULL NULL NULL 2 100.00
+3 DEPENDENT SUBQUERY <derived2> unique_subquery distinct_key distinct_key 4 func 1 100.00 Using where; Full scan on NULL key
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where !<expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,`test`.`t1`.`a` in ( <materialize> (/* select#3 */ select `tvc_0`.`1` from (values (1),(2)) `tvc_0` ), <primary_index_lookup>(`test`.`t1`.`a` in <temporary table> on distinct_key where `test`.`t1`.`a` = `<subquery3>`.`1`))))
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where !<expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,<exists>(<primary_index_lookup>(<cache>(`test`.`t1`.`a`) in <temporary table> on distinct_key where trigcond(<cache>(`test`.`t1`.`a`) = `tvc_0`.`1`)))))
explain extended select * from t1
where a not in (select * from (values (1),(2)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
+2 DEPENDENT SUBQUERY <derived3> unique_subquery distinct_key distinct_key 4 func 1 100.00 Using where; Full scan on NULL key
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where !<expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,`test`.`t1`.`a` in ( <materialize> (/* select#2 */ select `tvc_0`.`1` from (values (1),(2)) `tvc_0` ), <primary_index_lookup>(`test`.`t1`.`a` in <temporary table> on distinct_key where `test`.`t1`.`a` = `<subquery2>`.`1`))))
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where !<expr_cache><`test`.`t1`.`a`>(<in_optimizer>(`test`.`t1`.`a`,<exists>(<primary_index_lookup>(<cache>(`test`.`t1`.`a`) in <temporary table> on distinct_key where trigcond(<cache>(`test`.`t1`.`a`) = `tvc_0`.`1`)))))
# NOT IN subquery with VALUES structure(s) : UNION with VALUES on the first place
select * from t1
where a not in (values (1) union select 2);
@@ -978,21 +976,19 @@ a b
explain extended select * from t1
where a = any (values (1),(2));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived2> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`1` = `test`.`t1`.`a`
explain extended select * from t1
where a = any (select * from (values (1),(2)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join ((values (1),(2)) `tvc_0`) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from (values (1),(2)) `tvc_0` join `test`.`t1` where `tvc_0`.`1` = `test`.`t1`.`a`
# ANY-subquery with VALUES structure(s) : UNION with VALUES on the first place
select * from t1
where a = any (values (1) union select 2);
@@ -1011,7 +1007,7 @@ explain extended select * from t1
where a = any (values (1) union select 2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-4 DEPENDENT SUBQUERY <derived2> ref key0 key0 4 func 2 100.00
+4 DEPENDENT SUBQUERY <derived2> eq_ref distinct_key distinct_key 4 func 1 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union4,3> ALL NULL NULL NULL NULL NULL NULL
@@ -1022,7 +1018,7 @@ where a = any (select * from (values (1)) as tvc_0 union
select 2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> ref key0 key0 4 func 2 100.00
+2 DEPENDENT SUBQUERY <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
4 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,4> ALL NULL NULL NULL NULL NULL NULL
@@ -1047,7 +1043,7 @@ where a = any (select 2 union values (1));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-4 DEPENDENT UNION <derived3> ref key0 key0 4 func 2 100.00
+4 DEPENDENT UNION <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,4> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -1058,7 +1054,7 @@ select * from (values (1)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-3 DEPENDENT UNION <derived4> ref key0 key0 4 func 2 100.00
+3 DEPENDENT UNION <derived4> ref key0 key0 4 func 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -1140,7 +1136,7 @@ where a = any (select 1 union values (1));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-4 DEPENDENT UNION <derived3> ref key0 key0 4 func 2 100.00
+4 DEPENDENT UNION <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,4> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -1151,7 +1147,7 @@ select * from (values (1)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-3 DEPENDENT UNION <derived4> ref key0 key0 4 func 2 100.00
+3 DEPENDENT UNION <derived4> ref key0 key0 4 func 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -2688,9 +2684,9 @@ a
explain extended select a from t1 where a in (values (7) union values (8));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-4 DEPENDENT SUBQUERY <derived2> ref key0 key0 4 func 2 100.00
+4 DEPENDENT SUBQUERY <derived2> eq_ref distinct_key distinct_key 4 func 1 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-5 DEPENDENT UNION <derived3> ref key0 key0 4 func 2 100.00
+5 DEPENDENT UNION <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union4,5> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -2841,7 +2837,7 @@ id select_type table type possible_keys key key_len ref rows Extra
6 SUBQUERY <derived2> ALL NULL NULL NULL NULL 2
2 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
3 SUBQUERY t3 ALL NULL NULL NULL NULL 11 Using where
-3 SUBQUERY <derived5> ref key0 key0 8 test.t3.a 2 Using where; FirstMatch(t3)
+3 SUBQUERY <derived5> ref key0 key0 8 test.t3.a 1 Using where; FirstMatch(t3)
5 DERIVED t3 ALL NULL NULL NULL NULL 11 Using temporary; Using filesort
prepare stmt from "select
(values ((select * from t3 where a in (select * from v1))))";
@@ -2866,7 +2862,7 @@ id select_type table type possible_keys key key_len ref rows Extra
6 SUBQUERY <derived2> ALL NULL NULL NULL NULL 2
2 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
3 SUBQUERY t3 ALL NULL NULL NULL NULL 11 Using where
-3 SUBQUERY <derived5> ref key0 key0 8 test.t3.a 2 Using where; FirstMatch(t3)
+3 SUBQUERY <derived5> ref key0 key0 8 test.t3.a 1 Using where; FirstMatch(t3)
5 DERIVED t3 ALL NULL NULL NULL NULL 11 Using temporary; Using filesort
prepare stmt from "select
(values ((select * from t3
@@ -2951,6 +2947,8 @@ values ((values (4)), (select 5)), ((select 2), (values (8)));
values ((values (1) union values (1)));
(values (1) union values (1))
1
+values ((values (1) union all values (1)));
+ERROR 21000: Subquery returns more than 1 row
values ((values (1) union values (1) union values (1)));
(values (1) union values (1) union values (1))
1
diff --git a/mysql-test/main/table_value_constr.test b/mysql-test/main/table_value_constr.test
index 331cfb8b7db..ade90400b63 100644
--- a/mysql-test/main/table_value_constr.test
+++ b/mysql-test/main/table_value_constr.test
@@ -422,6 +422,7 @@ select * from t2;
--echo # recursive CTE that uses VALUES structure(s) : that uses UNION ALL
+--sorted_result
with recursive t2(a,b,st) as
(
values(1,1,1)
@@ -832,6 +833,7 @@ deallocate prepare stmt1;
explain
values (1,2);
+--source include/explain-no-costs.inc
explain format=json
values (1,2);
@@ -852,16 +854,19 @@ values (5,6)
union
values (1,2),(3,4);
+--source include/explain-no-costs.inc
explain format=json
select 1,2
union
values (1,2),(3,4);
+--source include/explain-no-costs.inc
explain format=json
values (1,2),(3,4)
union
select 1,2;
+--source include/explain-no-costs.inc
explain format=json
values (5,6)
union
@@ -874,6 +879,7 @@ values (3,4)
union
values (1,2);
+--source include/explain-no-costs.inc
explain format=json
select 1,2
union
@@ -898,16 +904,19 @@ values (1,2)
union all
values (1,2),(3,4);
+--source include/explain-no-costs.inc
explain format=json
values (1,2),(3,4)
union all
select 1,2;
+--source include/explain-no-costs.inc
explain format=json
select 1,2
union
values (1,2),(3,4);
+--source include/explain-no-costs.inc
explain format=json
values (1,2)
union all
@@ -920,6 +929,7 @@ values (3,4)
union all
values (1,2);
+--source include/explain-no-costs.inc
explain format=json
select 1,2
union all
@@ -1574,6 +1584,9 @@ values ((values (4)), (select 5)), ((select 2), (values (8)));
values ((values (1) union values (1)));
+--error ER_SUBQUERY_NO_1_ROW
+values ((values (1) union all values (1)));
+
values ((values (1) union values (1) union values (1)));
values ((values ((values (4)))));
diff --git a/mysql-test/main/tmp_table_count-7586.result b/mysql-test/main/tmp_table_count-7586.result
index 637e7385685..ebb2333113f 100644
--- a/mysql-test/main/tmp_table_count-7586.result
+++ b/mysql-test/main/tmp_table_count-7586.result
@@ -52,6 +52,7 @@ Created_tmp_disk_tables 0
Created_tmp_files 0
Created_tmp_tables 2
drop table t3;
+set @@optimizer_switch="firstmatch=off";
EXPLAIN SELECT * FROM t1 WHERE a IN (SELECT * FROM t2 GROUP BY a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
@@ -69,6 +70,7 @@ Variable_name Value
Created_tmp_disk_tables 0
Created_tmp_files 0
Created_tmp_tables 1
+set @@optimizer_switch=default;
drop table t1,t2,t3;
truncate table performance_schema.events_statements_history_long;
flush status;
diff --git a/mysql-test/main/tmp_table_count-7586.test b/mysql-test/main/tmp_table_count-7586.test
index 0629e27f164..8fe9e3d2eff 100644
--- a/mysql-test/main/tmp_table_count-7586.test
+++ b/mysql-test/main/tmp_table_count-7586.test
@@ -47,6 +47,7 @@ select sum(created_tmp_tables) from performance_schema.events_statements_history
show status like '%Created_tmp%';
drop table t3;
+set @@optimizer_switch="firstmatch=off";
EXPLAIN SELECT * FROM t1 WHERE a IN (SELECT * FROM t2 GROUP BY a);
truncate table performance_schema.events_statements_history_long;
flush status;
@@ -54,6 +55,7 @@ CREATE TABLE t3 SELECT * FROM t1 WHERE a IN (SELECT * FROM t2 GROUP BY a);
--echo # Performance schema should be the same as "Created_tmp_tables" variable below
select sum(created_tmp_tables) from performance_schema.events_statements_history_long;
show status like '%Created_tmp%';
+set @@optimizer_switch=default;
drop table t1,t2,t3;
diff --git a/mysql-test/main/trigger.result b/mysql-test/main/trigger.result
index 410c5a53ce2..b3ab5585d92 100644
--- a/mysql-test/main/trigger.result
+++ b/mysql-test/main/trigger.result
@@ -736,8 +736,6 @@ select user() into user;
set NEW.username = user;
select count(*) from ((select 1) union (select 2)) as d1 into i;
end|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
update t1 set data = 1;
connection addconroot1;
update t1 set data = 2;
@@ -2086,8 +2084,6 @@ FOR EACH ROW BEGIN
SELECT 1 FROM t1 c WHERE
(@bug51650 IS NULL OR @bug51650 != c.b) AND c.b = NEW.a LIMIT 1 INTO @foo;
END//
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SET @bug51650 = 1;
INSERT IGNORE INTO t2 VALUES();
INSERT IGNORE INTO t1 SET b = '777';
diff --git a/mysql-test/main/trigger_notembedded.result b/mysql-test/main/trigger_notembedded.result
index 41547ac1e99..b6d878431f2 100644
--- a/mysql-test/main/trigger_notembedded.result
+++ b/mysql-test/main/trigger_notembedded.result
@@ -112,7 +112,7 @@ CREATE DEFINER='mysqltest_inv'@'localhost'
TRIGGER trg1 BEFORE INSERT ON t1
FOR EACH ROW
SET @new_sum = 0;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
connection default;
use mysqltest_db1;
GRANT SET USER ON *.* TO mysqltest_dfn@localhost;
diff --git a/mysql-test/main/type_blob.result b/mysql-test/main/type_blob.result
index 851d0bd72d4..ccbbe5c9118 100644
--- a/mysql-test/main/type_blob.result
+++ b/mysql-test/main/type_blob.result
@@ -690,18 +690,18 @@ id txt
2 Chevy
select * from t1 where txt > 'Chevy';
id txt
+7 Ford
4 Honda
-5 Subaru
6 Honda
-7 Ford
+5 Subaru
select * from t1 where txt >= 'Chevy';
id txt
1 Chevy
2 Chevy
+7 Ford
4 Honda
-5 Subaru
6 Honda
-7 Ford
+5 Subaru
alter table t1 modify column txt blob;
explain select * from t1 where txt='Chevy' or txt is NULL;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/main/type_datetime.result b/mysql-test/main/type_datetime.result
index 3e864d0ffa9..96f3c568446 100644
--- a/mysql-test/main/type_datetime.result
+++ b/mysql-test/main/type_datetime.result
@@ -545,7 +545,7 @@ select * from t1
where id in (select id from t1 as x1 where (t1.cur_date is null));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 PRIMARY x1 ALL NULL NULL NULL NULL 2 100.00 Using where; Start temporary; End temporary
+1 PRIMARY x1 ALL NULL NULL NULL NULL 2 100.00 Using where; FirstMatch(t1)
Warnings:
Note 1276 Field or reference 'test.t1.cur_date' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t1`.`id` AS `id`,`test`.`t1`.`cur_date` AS `cur_date` from `test`.`t1` semi join (`test`.`t1` `x1`) where `test`.`x1`.`id` = `test`.`t1`.`id` and `test`.`t1`.`cur_date` = 0
@@ -557,7 +557,7 @@ select * from t2
where id in (select id from t2 as x1 where (t2.cur_date is null));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 PRIMARY x1 ALL NULL NULL NULL NULL 2 100.00 Using where; Start temporary; End temporary
+1 PRIMARY x1 ALL NULL NULL NULL NULL 2 100.00 Using where; FirstMatch(t2)
Warnings:
Note 1276 Field or reference 'test.t2.cur_date' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t2`.`id` AS `id`,`test`.`t2`.`cur_date` AS `cur_date` from `test`.`t2` semi join (`test`.`t2` `x1`) where `test`.`x1`.`id` = `test`.`t2`.`id` and `test`.`t2`.`cur_date` = 0
diff --git a/mysql-test/main/type_enum.result b/mysql-test/main/type_enum.result
index 9e313f5b302..a935e3d63cb 100644
--- a/mysql-test/main/type_enum.result
+++ b/mysql-test/main/type_enum.result
@@ -2377,7 +2377,7 @@ t2 CREATE TABLE `t2` (
DROP TABLE t2;
SELECT c_int FROM t1 UNION SELECT c_enum FROM t1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def c_int c_int 253 11 0 Y 0 0 8
+def c_int c_int 253 11 0 Y 16384 0 8
c_int
SELECT COALESCE(c_int, c_enum) FROM t1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
diff --git a/mysql-test/main/type_ranges.result b/mysql-test/main/type_ranges.result
index 02b6c79bdf5..012d1fc67ce 100644
--- a/mysql-test/main/type_ranges.result
+++ b/mysql-test/main/type_ranges.result
@@ -173,12 +173,12 @@ PRIMARY KEY (auto)
);
INSERT IGNORE INTO t2 (string,mediumblob_col,new_field) SELECT string,mediumblob_col,new_field from t1 where auto > 10;
Warnings:
+Warning 1265 Data truncated for column 'new_field' at row 1
Warning 1265 Data truncated for column 'new_field' at row 2
Warning 1265 Data truncated for column 'new_field' at row 3
Warning 1265 Data truncated for column 'new_field' at row 4
Warning 1265 Data truncated for column 'new_field' at row 5
Warning 1265 Data truncated for column 'new_field' at row 6
-Warning 1265 Data truncated for column 'new_field' at row 7
select * from t2;
auto string mediumblob_col new_field
1 2 2 ne
diff --git a/mysql-test/main/type_set.result b/mysql-test/main/type_set.result
index 571e0d36eaf..5821bbae984 100644
--- a/mysql-test/main/type_set.result
+++ b/mysql-test/main/type_set.result
@@ -403,7 +403,7 @@ t2 CREATE TABLE `t2` (
DROP TABLE t2;
SELECT c_int FROM t1 UNION SELECT c_set FROM t1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
-def c_int c_int 253 33 0 Y 0 0 33
+def c_int c_int 253 33 0 Y 16384 0 33
c_int
SELECT COALESCE(c_int, c_set) FROM t1;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
diff --git a/mysql-test/main/type_time_6065.result b/mysql-test/main/type_time_6065.result
index 56de96870b6..e0014ea75d9 100644
--- a/mysql-test/main/type_time_6065.result
+++ b/mysql-test/main/type_time_6065.result
@@ -220,7 +220,7 @@ t1 force INDEX (col_time_key)
WHERE col_time_key = col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00 Using where
-1 SIMPLE t1 ref col_time_key col_time_key 4 test.t2.col_datetime_key 2 100.00 Using where; Using index
+1 SIMPLE t1 ref col_time_key col_time_key 4 test.t2.col_datetime_key 1 100.00 Using where; Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` IGNORE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t1`.`col_time_key` = `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -241,7 +241,7 @@ t1 force INDEX (col_time_key)
WHERE col_datetime_key = col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00 Using where
-1 SIMPLE t1 ref col_time_key col_time_key 4 test.t2.col_datetime_key 2 100.00 Using where; Using index
+1 SIMPLE t1 ref col_time_key col_time_key 4 test.t2.col_datetime_key 1 100.00 Using where; Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` IGNORE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t2`.`col_datetime_key` = `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -304,7 +304,7 @@ t1 force INDEX (col_time_key)
WHERE col_time_key = col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Using where; Using index
-1 SIMPLE t1 ref col_time_key col_time_key 4 test.t2.col_datetime_key 2 100.00 Using where; Using index
+1 SIMPLE t1 ref col_time_key col_time_key 4 test.t2.col_datetime_key 1 100.00 Using where; Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` FORCE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t1`.`col_time_key` = `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -325,7 +325,7 @@ t1 force INDEX (col_time_key)
WHERE col_datetime_key = col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Using where; Using index
-1 SIMPLE t1 ref col_time_key col_time_key 4 test.t2.col_datetime_key 2 100.00 Using where; Using index
+1 SIMPLE t1 ref col_time_key col_time_key 4 test.t2.col_datetime_key 1 100.00 Using where; Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` FORCE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t2`.`col_datetime_key` = `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -408,7 +408,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_time_key >= col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` IGNORE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t1`.`col_time_key` >= `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -439,7 +439,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_datetime_key >= col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` IGNORE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t2`.`col_datetime_key` >= `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -532,7 +532,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_time_key >= col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Using index
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` FORCE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t1`.`col_time_key` >= `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -563,7 +563,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_datetime_key >= col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Using index
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` FORCE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t2`.`col_datetime_key` >= `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -656,7 +656,7 @@ t1 force INDEX (col_time_key)
WHERE col_time_key >= col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` IGNORE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t1`.`col_time_key` >= `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -687,7 +687,7 @@ t1 force INDEX (col_time_key)
WHERE col_datetime_key >= col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` IGNORE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t2`.`col_datetime_key` >= `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -780,7 +780,7 @@ t1 force INDEX (col_time_key)
WHERE col_time_key >= col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Using index
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` FORCE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t1`.`col_time_key` >= `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -811,7 +811,7 @@ t1 force INDEX (col_time_key)
WHERE col_datetime_key >= col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Using index
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` FORCE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t2`.`col_datetime_key` >= `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -894,7 +894,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_time_key > col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` IGNORE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t1`.`col_time_key` > `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -920,7 +920,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_datetime_key > col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` IGNORE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t2`.`col_datetime_key` > `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -998,7 +998,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_time_key > col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Using index
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` FORCE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t1`.`col_time_key` > `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -1024,7 +1024,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_datetime_key > col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Using index
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` FORCE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t2`.`col_datetime_key` > `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -1102,7 +1102,7 @@ t1 force INDEX (col_time_key)
WHERE col_time_key > col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` IGNORE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t1`.`col_time_key` > `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -1128,7 +1128,7 @@ t1 force INDEX (col_time_key)
WHERE col_datetime_key > col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` IGNORE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t2`.`col_datetime_key` > `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -1206,7 +1206,7 @@ t1 force INDEX (col_time_key)
WHERE col_time_key > col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Using index
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` FORCE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t1`.`col_time_key` > `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -1232,7 +1232,7 @@ t1 force INDEX (col_time_key)
WHERE col_datetime_key > col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Using index
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` FORCE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t2`.`col_datetime_key` > `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -1320,7 +1320,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_time_key <= col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` IGNORE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t1`.`col_time_key` <= `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -1351,7 +1351,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_datetime_key <= col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` IGNORE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t2`.`col_datetime_key` <= `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -1444,7 +1444,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_time_key <= col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Using index
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` FORCE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t1`.`col_time_key` <= `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -1475,7 +1475,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_datetime_key <= col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Using index
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` FORCE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t2`.`col_datetime_key` <= `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -1568,7 +1568,7 @@ t1 force INDEX (col_time_key)
WHERE col_time_key <= col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` IGNORE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t1`.`col_time_key` <= `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -1599,7 +1599,7 @@ t1 force INDEX (col_time_key)
WHERE col_datetime_key <= col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` IGNORE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t2`.`col_datetime_key` <= `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -1692,7 +1692,7 @@ t1 force INDEX (col_time_key)
WHERE col_time_key <= col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Using index
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` FORCE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t1`.`col_time_key` <= `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -1723,7 +1723,7 @@ t1 force INDEX (col_time_key)
WHERE col_datetime_key <= col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Using index
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` FORCE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t2`.`col_datetime_key` <= `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -1806,7 +1806,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_time_key < col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` IGNORE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t1`.`col_time_key` < `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -1832,7 +1832,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_datetime_key < col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` IGNORE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t2`.`col_datetime_key` < `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -1910,7 +1910,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_time_key < col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Using index
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` FORCE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t1`.`col_time_key` < `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -1936,7 +1936,7 @@ t2 force INDEX (col_datetime_key)
WHERE col_datetime_key < col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Using index
-1 SIMPLE t2 ALL col_datetime_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t1`.`col_time_key` AS `col_time_key`,`test`.`t2`.`col_datetime_key` AS `col_datetime_key` from `test`.`t1` FORCE INDEX (`col_time_key`) straight_join `test`.`t2` FORCE INDEX (`col_datetime_key`) where `test`.`t2`.`col_datetime_key` < `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -2014,7 +2014,7 @@ t1 force INDEX (col_time_key)
WHERE col_time_key < col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` IGNORE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t1`.`col_time_key` < `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -2040,7 +2040,7 @@ t1 force INDEX (col_time_key)
WHERE col_datetime_key < col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` IGNORE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t2`.`col_datetime_key` < `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -2118,7 +2118,7 @@ t1 force INDEX (col_time_key)
WHERE col_time_key < col_datetime_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Using index
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` FORCE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t1`.`col_time_key` < `test`.`t2`.`col_datetime_key`
SELECT * FROM
@@ -2144,7 +2144,7 @@ t1 force INDEX (col_time_key)
WHERE col_datetime_key < col_time_key;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index col_datetime_key col_datetime_key 6 NULL 5 100.00 Using index
-1 SIMPLE t1 ALL col_time_key NULL NULL NULL 5 100.00 Range checked for each record (index map: 0x1)
+1 SIMPLE t1 index col_time_key col_time_key 4 NULL 5 100.00 Range checked for each record (index map: 0x1); Using index
Warnings:
Note 1003 select `test`.`t2`.`col_datetime_key` AS `col_datetime_key`,`test`.`t1`.`col_time_key` AS `col_time_key` from `test`.`t2` FORCE INDEX (`col_datetime_key`) straight_join `test`.`t1` FORCE INDEX (`col_time_key`) where `test`.`t2`.`col_datetime_key` < `test`.`t1`.`col_time_key`
SELECT * FROM
@@ -2267,9 +2267,8 @@ outr.col_varchar_key IS NULL
);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY outr system col_datetime_key NULL NULL NULL 1 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
+1 PRIMARY innr ref col_int_key col_int_key 4 const 2 50.00 Using where; FirstMatch(outr)
1 PRIMARY outr2 index col_time_key col_time_key 4 NULL 20 100.00 Using where; Using index; Using join buffer (flat, BNL join)
-2 MATERIALIZED innr ref col_int_key col_int_key 4 const 2 100.00 Using where
Warnings:
Note 1003 select 1 AS `col_int_nokey` from `test`.`t3` `outr2` semi join (`test`.`t1` `innr`) where `test`.`innr`.`col_int_key` = 1 and `test`.`innr`.`pk` >= `test`.`innr`.`col_int_nokey` and `test`.`outr2`.`col_time_key` > '2001-11-04 19:07:55'
SELECT outr.col_int_nokey
diff --git a/mysql-test/main/type_timestamp.result b/mysql-test/main/type_timestamp.result
index a4516fc91a2..a64d393ee66 100644
--- a/mysql-test/main/type_timestamp.result
+++ b/mysql-test/main/type_timestamp.result
@@ -1348,6 +1348,8 @@ SET time_zone=DEFAULT;
# MDEV-29225 make explicit_defaults_for_timestamps SESSION variable
#
set explicit_defaults_for_timestamp=OFF;
+Warnings:
+Warning 1681 'explicit_defaults_for_timestamp=0' is deprecated and will be removed in a future release
create table t1 (f1 timestamp, f2 timestamp);
show create table t1;
Table Create Table
diff --git a/mysql-test/main/union.result b/mysql-test/main/union.result
index aab13d191a4..797abaf9de5 100644
--- a/mysql-test/main/union.result
+++ b/mysql-test/main/union.result
@@ -1541,8 +1541,6 @@ NULL
(select 2) union (select 1 into @var);
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'into @var)' at line 1
(select 1) union (select 1) into @var;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
(select 2) union (select 1) into @var;
ERROR 42000: Result consisted of more than one row
CREATE TABLE t1 (a int);
@@ -1675,14 +1673,8 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
SELECT a FROM t1 UNION SELECT a INTO OUTFILE 'union.out.file6' FROM t1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'FROM t1' at line 1
SELECT a FROM t1 UNION SELECT a FROM t1 INTO @v ;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT a FROM t1 UNION SELECT a FROM t1 INTO OUTFILE 'union.out.file5';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT a FROM t1 UNION SELECT a FROM t1 INTO OUTFILE 'union.out.file6';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT a INTO @v FROM t1 UNION SELECT a FROM t1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT a FROM t1' at line 1
SELECT a INTO OUTFILE 'union.out.file7' FROM t1 UNION SELECT a FROM t1;
@@ -2783,3 +2775,70 @@ drop table t1;
#
# End of 10.3 tests
#
+
+# check union all & union disctinct
+
+select 1 as res union select 2 union select 1 union select 2;
+res
+1
+2
+select 1 as res union distinct select 2 union distinct select 1 union distinct select 2;
+res
+1
+2
+select 1 as res union all select 2 union all select 1 union all select 2;
+res
+1
+2
+1
+2
+select truncate(seq/2,0)+1 as res from seq_1_to_6 union select 2;
+res
+1
+2
+3
+4
+select truncate(seq/2,0)+1 as res from seq_1_to_6 union distinct select 2;
+res
+1
+2
+3
+4
+select truncate(seq/2,0)+1 as res from seq_1_to_6 union all select 2;
+res
+1
+2
+2
+3
+3
+4
+2
+select 1 as res union all select 2 union distinct select 1 union all select 2;
+res
+1
+2
+2
+select 1 as res union select 2 union all select 1 union distinct select 3;
+res
+1
+2
+3
+select 1 as res union select 2 union all select 1 union distinct select 3 union all select 2;
+res
+1
+2
+3
+2
+select 1 as res union select 2 union all select 1 union distinct select 3 union all select 2 union distinct select 5;
+res
+1
+2
+3
+5
+select truncate(seq/2,0)+1 as res from seq_1_to_6 union all select 2 union all select 1 union distinct select 3 union all select 2;
+res
+1
+2
+3
+4
+2
diff --git a/mysql-test/main/union.test b/mysql-test/main/union.test
index 4cebdcff9bb..98c36e0404a 100644
--- a/mysql-test/main/union.test
+++ b/mysql-test/main/union.test
@@ -1,6 +1,8 @@
#
# Test of unions
#
+--source include/have_sequence.inc
+
CREATE TABLE t1 (a int not null, b char (10) not null);
insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c');
@@ -2010,3 +2012,21 @@ drop table t1;
--echo #
--echo # End of 10.3 tests
--echo #
+
+--echo
+--echo # check union all & union disctinct
+--echo
+
+select 1 as res union select 2 union select 1 union select 2;
+select 1 as res union distinct select 2 union distinct select 1 union distinct select 2;
+select 1 as res union all select 2 union all select 1 union all select 2;
+
+select truncate(seq/2,0)+1 as res from seq_1_to_6 union select 2;
+select truncate(seq/2,0)+1 as res from seq_1_to_6 union distinct select 2;
+select truncate(seq/2,0)+1 as res from seq_1_to_6 union all select 2;
+
+select 1 as res union all select 2 union distinct select 1 union all select 2;
+select 1 as res union select 2 union all select 1 union distinct select 3;
+select 1 as res union select 2 union all select 1 union distinct select 3 union all select 2;
+select 1 as res union select 2 union all select 1 union distinct select 3 union all select 2 union distinct select 5;
+select truncate(seq/2,0)+1 as res from seq_1_to_6 union all select 2 union all select 1 union distinct select 3 union all select 2;
diff --git a/mysql-test/main/update_use_source.result b/mysql-test/main/update_use_source.result
index 5a9e0a7750c..d61bff04e0d 100644
--- a/mysql-test/main/update_use_source.result
+++ b/mysql-test/main/update_use_source.result
@@ -76,8 +76,7 @@ rollback;
explain update t1 set c1=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 > 3;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED a ALL NULL NULL NULL NULL 8
+1 PRIMARY a ALL NULL NULL NULL NULL 8 Using where; FirstMatch(t1)
start transaction;
update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3;
affected rows: 4
@@ -318,8 +317,7 @@ rollback;
explain update t1 set c1=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 > 3;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 range t1_c2 t1_c2 5 NULL 2 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED a range t1_c2 t1_c2 5 NULL 2 Using where; Using index
+1 PRIMARY a ref t1_c2 t1_c2 5 test.t1.c2 8 Using index; FirstMatch(t1)
start transaction;
update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3;
affected rows: 4
@@ -559,9 +557,8 @@ rollback;
#
explain update t1 set c1=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 > 3;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 range t1_c2 t1_c2 5 NULL 2 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED a range t1_c2 t1_c2 5 NULL 2 Using where; Using index
+1 PRIMARY a index t1_c2 t1_c2 10 NULL 8 Using where; Using index; LooseScan
+1 PRIMARY t1 ref t1_c2 t1_c2 5 test.a.c2 1
start transaction;
update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3;
affected rows: 4
@@ -802,9 +799,8 @@ rollback;
#
explain update t1 set c1=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 > 3;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 range t1_c2 t1_c2 5 NULL 2 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED a range t1_c2 t1_c2 5 NULL 2 Using where; Using index
+1 PRIMARY a index t1_c2 t1_c2 10 NULL 8 Using where; Using index; LooseScan
+1 PRIMARY t1 ref t1_c2 t1_c2 5 test.a.c2 1
start transaction;
update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3;
affected rows: 4
diff --git a/mysql-test/main/upgrade_MDEV-19650.test b/mysql-test/main/upgrade_MDEV-19650.test
index a2f9fee705f..d4bd64ce857 100644
--- a/mysql-test/main/upgrade_MDEV-19650.test
+++ b/mysql-test/main/upgrade_MDEV-19650.test
@@ -101,8 +101,8 @@ let $MYSQLD_DATADIR= `select @@datadir`;
--echo # Run mysql_upgrade
--exec $MYSQL_UPGRADE 2>&1
---file_exists $MYSQLD_DATADIR/mysql_upgrade_info
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--file_exists $MYSQLD_DATADIR/mariadb_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
--echo # check new definitions mysql_upgrade
diff --git a/mysql-test/main/upgrade_MDEV-23102-1.test b/mysql-test/main/upgrade_MDEV-23102-1.test
index 172e0d595b2..b70230ebdf9 100644
--- a/mysql-test/main/upgrade_MDEV-23102-1.test
+++ b/mysql-test/main/upgrade_MDEV-23102-1.test
@@ -115,8 +115,8 @@ let $MYSQLD_DATADIR= `select @@datadir`;
--echo # Run mysql_upgrade
--exec $MYSQL_UPGRADE 2>&1
---file_exists $MYSQLD_DATADIR/mysql_upgrade_info
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--file_exists $MYSQLD_DATADIR/mariadb_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
--echo # check new definitions mysql_upgrade
@@ -138,8 +138,8 @@ DROP VIEW mysql.user;
DROP PROCEDURE AddGeometryColumn;
DROP PROCEDURE DropGeometryColumn;
--exec $MYSQL_UPGRADE 2>&1
---file_exists $MYSQLD_DATADIR/mysql_upgrade_info
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--file_exists $MYSQLD_DATADIR/mariadb_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
delete from global_priv;
delete from tables_priv;
diff --git a/mysql-test/main/upgrade_MDEV-23102-2.test b/mysql-test/main/upgrade_MDEV-23102-2.test
index f2d7ac578e0..d55ebe03d29 100644
--- a/mysql-test/main/upgrade_MDEV-23102-2.test
+++ b/mysql-test/main/upgrade_MDEV-23102-2.test
@@ -96,8 +96,8 @@ let $MYSQLD_DATADIR= `select @@datadir`;
--echo # Run mysql_upgrade
--exec $MYSQL_UPGRADE 2>&1
---file_exists $MYSQLD_DATADIR/mysql_upgrade_info
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--file_exists $MYSQLD_DATADIR/mariadb_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
--echo # check new definitions mysql_upgrade
@@ -116,8 +116,8 @@ select count(*) from global_priv where user='mariadb.sys' and host='localhost';
DROP USER 'superuser'@'localhost';
DROP VIEW mysql.user;
--exec $MYSQL_UPGRADE 2>&1
---file_exists $MYSQLD_DATADIR/mysql_upgrade_info
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--file_exists $MYSQLD_DATADIR/mariadb_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
delete from global_priv;
delete from tables_priv;
diff --git a/mysql-test/main/upgrade_geometrycolumn_procedure_definer.test b/mysql-test/main/upgrade_geometrycolumn_procedure_definer.test
index 5111fcdf4ea..7ef8b4c02fc 100644
--- a/mysql-test/main/upgrade_geometrycolumn_procedure_definer.test
+++ b/mysql-test/main/upgrade_geometrycolumn_procedure_definer.test
@@ -43,8 +43,8 @@ let $MYSQLD_DATADIR= `select @@datadir`;
--echo # Run mysql_upgrade
--echo #
--exec $MYSQL_UPGRADE 2>&1
---file_exists $MYSQLD_DATADIR/mysql_upgrade_info
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--file_exists $MYSQLD_DATADIR/mariadb_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
--echo #
--echo # check new definers of Add/DropGeometryColumn
diff --git a/mysql-test/main/upgrade_mdev_24363.test b/mysql-test/main/upgrade_mdev_24363.test
index c188d4c7fac..d08ffa3b2a6 100644
--- a/mysql-test/main/upgrade_mdev_24363.test
+++ b/mysql-test/main/upgrade_mdev_24363.test
@@ -55,8 +55,8 @@ drop user gigi@localhost;
--echo # Run mysql_upgrade
--exec $MYSQL_UPGRADE 2>&1
let $MYSQLD_DATADIR= `select @@datadir`;
---file_exists $MYSQLD_DATADIR/mysql_upgrade_info
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--file_exists $MYSQLD_DATADIR/mariadb_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
create user gigi@localhost;
show create user gigi@localhost;
diff --git a/mysql-test/main/userstat.result b/mysql-test/main/userstat.result
index 5315317e33a..eec7910aae8 100644
--- a/mysql-test/main/userstat.result
+++ b/mysql-test/main/userstat.result
@@ -1,6 +1,4 @@
select variable_value from information_schema.global_status where variable_name="handler_read_key" into @global_read_key;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
show columns from information_schema.client_statistics;
Field Type Null Key Default Extra
CLIENT varchar(64) NO NULL
diff --git a/mysql-test/main/view.result b/mysql-test/main/view.result
index aec4a5d09f2..b5c5564a7da 100644
--- a/mysql-test/main/view.result
+++ b/mysql-test/main/view.result
@@ -2445,8 +2445,6 @@ SELECT Meaning FROM v1 INTO retn;
RETURN retn;
END
//
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE VIEW v2 AS SELECT f1();
select * from v2;
f1()
@@ -2618,8 +2616,6 @@ declare mx int;
select max(a) from t1 into mx;
return mx;
end//
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create view v1 as select f1() as a;
create view v2 as select * from v1;
drop table t1;
@@ -3162,14 +3158,10 @@ DROP VIEW v1;
DROP TABLE t1;
DROP VIEW IF EXISTS v1;
SELECT * FROM (SELECT 1) AS t into @w;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE VIEW v1 AS SELECT * FROM (SELECT 1) AS t into @w;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'into @w' at line 1
# Previously the following would fail.
SELECT * FROM (SELECT 1) AS t into @w;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
drop view if exists view_24532_a;
drop view if exists view_24532_b;
drop table if exists table_24532;
@@ -3966,8 +3958,6 @@ BEGIN
SELECT a FROM v2 INTO @a;
RETURN @a;
END//
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Trigger pre-locking when opening v2.
CREATE VIEW v1 AS SELECT f1() FROM t1;
SHOW CREATE VIEW v1;
@@ -4213,10 +4203,10 @@ INSERT INTO t2 VALUES
EXPLAIN EXTENDED
SELECT t1.a,t2.c FROM t1,t2 WHERE t2.pk = t1.a AND t2.pk > 8;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 16 100.00 Using where
-1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00
+1 SIMPLE t2 ALL PRIMARY NULL NULL NULL 12 75.00 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 16 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`pk` = `test`.`t1`.`a` and `test`.`t1`.`a` > 8
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`pk` and `test`.`t2`.`pk` > 8
FLUSH STATUS;
SELECT t1.a,t2.c FROM t1,t2 WHERE t2.pk = t1.a AND t2.pk > 8;
a c
@@ -4225,22 +4215,34 @@ a c
SHOW STATUS LIKE 'Handler_read_%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 1
+Handler_read_key 0
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
-Handler_read_rnd_next 17
+Handler_read_rnd_next 30
+analyze table t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+explain extended SELECT t1.a,t2.c FROM t1,t2 WHERE t2.pk = t1.a AND t2.pk > 8;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 16 12.50 Using where
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`pk` = `test`.`t1`.`a` and `test`.`t1`.`a` > 8
CREATE VIEW v AS SELECT * FROM t2;
EXPLAIN EXTENDED
SELECT t1.a,v.c FROM t1,v WHERE v.pk = t1.a AND v.pk > 8;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 16 100.00 Using where
-1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00
+1 SIMPLE t2 ALL PRIMARY NULL NULL NULL 12 75.00 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 16 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
-Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`pk` = `test`.`t1`.`a` and `test`.`t1`.`a` > 8
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`pk` and `test`.`t2`.`pk` > 8
FLUSH STATUS;
SELECT t1.a,v.c FROM t1,v WHERE v.pk = t1.a AND v.pk > 8;
a c
@@ -4249,14 +4251,20 @@ a c
SHOW STATUS LIKE 'Handler_read_%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 1
+Handler_read_key 0
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
-Handler_read_rnd_next 17
+Handler_read_rnd_next 30
+set statement optimizer_where_cost=100 FOR explain extended SELECT t1.a,v.c FROM t1,v WHERE v.pk = t1.a AND v.pk > 8;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 16 100.00 Using where
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`pk` = `test`.`t1`.`a` and `test`.`t1`.`a` > 8
DROP VIEW v;
DROP TABLE t1, t2;
#
@@ -5481,8 +5489,8 @@ from t1 left join v1 on v1.c=t1.b
where t1.a < 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range a a 5 NULL 3 100.00 Using index condition
-1 SIMPLE t2 ref c c 5 test.t1.b 2 100.00 Using where
-1 SIMPLE t3 ref f,e e 5 test.t2.d 2 100.00 Using where
+1 SIMPLE t2 ref c c 5 test.t1.b 1 100.00 Using where
+1 SIMPLE t3 ref f,e e 5 test.t2.d 1 100.00 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d`,`test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t2`.`c` = `test`.`t1`.`b` and `test`.`t3`.`e` = `test`.`t2`.`d` and `test`.`t3`.`f` is not null and `test`.`t1`.`b` is not null and `test`.`t2`.`d` is not null) where `test`.`t1`.`a` < 5
explain extended
@@ -5492,8 +5500,8 @@ on t2.c=t1.b and t3.f is not null
where t1.a < 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range a a 5 NULL 3 100.00 Using index condition
-1 SIMPLE t2 ref c c 5 test.t1.b 2 100.00 Using where
-1 SIMPLE t3 ref f,e e 5 test.t2.d 2 100.00 Using where
+1 SIMPLE t2 ref c c 5 test.t1.b 1 100.00 Using where
+1 SIMPLE t3 ref f,e e 5 test.t2.d 1 100.00 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d`,`test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t2`.`c` = `test`.`t1`.`b` and `test`.`t3`.`e` = `test`.`t2`.`d` and `test`.`t3`.`f` is not null and `test`.`t1`.`b` is not null and `test`.`t2`.`d` is not null) where `test`.`t1`.`a` < 5
explain extended
@@ -5503,7 +5511,7 @@ where t1.a < 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range a a 5 NULL 3 100.00 Using index condition
1 SIMPLE t3 eq_ref f,e f 4 test.t1.a 1 100.00 Using where
-1 SIMPLE t2 ref c c 5 test.t1.b 2 100.00 Using where
+1 SIMPLE t2 ref c c 5 test.t1.b 1 100.00 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d`,`test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t2`.`c` = `test`.`t1`.`b` and `test`.`t3`.`f` = `test`.`t1`.`a` and `test`.`t2`.`d` = `test`.`t3`.`e` and `test`.`t1`.`a` is not null and `test`.`t1`.`a` is not null and `test`.`t1`.`b` is not null) where `test`.`t1`.`a` < 5
explain extended
@@ -5514,7 +5522,7 @@ where t1.a < 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range a a 5 NULL 3 100.00 Using index condition
1 SIMPLE t3 eq_ref f,e f 4 test.t1.a 1 100.00 Using where
-1 SIMPLE t2 ref c c 5 test.t1.b 2 100.00 Using where
+1 SIMPLE t2 ref c c 5 test.t1.b 1 100.00 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d`,`test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t1` left join (`test`.`t2` join `test`.`t3`) on(`test`.`t2`.`c` = `test`.`t1`.`b` and `test`.`t3`.`f` = `test`.`t1`.`a` and `test`.`t2`.`d` = `test`.`t3`.`e` and `test`.`t1`.`a` is not null and `test`.`t1`.`a` is not null and `test`.`t1`.`b` is not null) where `test`.`t1`.`a` < 5
drop view v1;
diff --git a/mysql-test/main/view.test b/mysql-test/main/view.test
index c6cc9a69f93..3b3d1124a09 100644
--- a/mysql-test/main/view.test
+++ b/mysql-test/main/view.test
@@ -1093,10 +1093,11 @@ insert into t1 values (1);
update v2 set s1 = 1;
select * from v2;
select * from t2;
-# scheck how VIEWs with subqueries work with prepared statements
+# check how VIEWs with subqueries work with prepared statements
prepare stmt1 from "select * from v2;";
execute stmt1;
insert into t1 values (0);
+--sorted_result
execute stmt1;
deallocate prepare stmt1;
drop view v2;
@@ -1880,7 +1881,9 @@ CREATE VIEW v1 AS SELECT id, f FROM t1 WHERE id <= 2;
INSERT INTO t1 VALUES (2, 'foo2');
INSERT INTO t1 VALUES (1, 'foo1');
+--sorted_result
SELECT * FROM v1;
+--sorted_result
SELECT * FROM v1;
DROP VIEW v1;
@@ -4162,12 +4165,18 @@ FLUSH STATUS;
SELECT t1.a,t2.c FROM t1,t2 WHERE t2.pk = t1.a AND t2.pk > 8;
SHOW STATUS LIKE 'Handler_read_%';
+analyze table t1,t2;
+explain extended SELECT t1.a,t2.c FROM t1,t2 WHERE t2.pk = t1.a AND t2.pk > 8;
+
CREATE VIEW v AS SELECT * FROM t2;
EXPLAIN EXTENDED
SELECT t1.a,v.c FROM t1,v WHERE v.pk = t1.a AND v.pk > 8;
FLUSH STATUS;
SELECT t1.a,v.c FROM t1,v WHERE v.pk = t1.a AND v.pk > 8;
SHOW STATUS LIKE 'Handler_read_%';
+
+set statement optimizer_where_cost=100 FOR explain extended SELECT t1.a,v.c FROM t1,v WHERE v.pk = t1.a AND v.pk > 8;
+
DROP VIEW v;
DROP TABLE t1, t2;
diff --git a/mysql-test/main/view_grant.result b/mysql-test/main/view_grant.result
index 3754a55eea8..1c720f215f8 100644
--- a/mysql-test/main/view_grant.result
+++ b/mysql-test/main/view_grant.result
@@ -22,7 +22,7 @@ grant create view,select on test.* to mysqltest_1@localhost;
connect user1,localhost,mysqltest_1,,test;
connection user1;
create definer=root@localhost view v1 as select * from mysqltest.t1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
create view v1 as select * from mysqltest.t1;
alter view v1 as select * from mysqltest.t1;
ERROR 42000: DROP command denied to user 'mysqltest_1'@'localhost' for table `test`.`v1`
@@ -415,8 +415,6 @@ create table t2 (s1 int);
drop function if exists f2;
create function f2 () returns int begin declare v int; select s1 from t2
into v; return v; end//
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create algorithm=TEMPTABLE view v1 as select f2() from t1;
create algorithm=MERGE view v2 as select f2() from t1;
create algorithm=TEMPTABLE SQL SECURITY INVOKER view v3 as select f2() from t1;
@@ -459,8 +457,6 @@ create table t2 (s1 int);
drop function if exists f2;
create function f2 () returns int begin declare v int; select s1 from t2
into v; return v; end//
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create user mysqltest_1@localhost;
grant select on t1 to mysqltest_1@localhost;
grant execute on function f2 to mysqltest_1@localhost;
@@ -931,7 +927,7 @@ ERROR 42000: CREATE VIEW command denied to user 'u26813'@'localhost' for table `
ALTER VIEW v2 AS SELECT f2 FROM t1;
ERROR 42000: DROP command denied to user 'u26813'@'localhost' for table `db26813`.`v2`
ALTER VIEW v3 AS SELECT f2 FROM t1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
connection root;
SHOW CREATE VIEW v3;
View Create View character_set_client collation_connection
@@ -959,9 +955,9 @@ GRANT SELECT, DROP, CREATE VIEW, SHOW VIEW ON mysqltest_29908.v2 TO u29908_2@loc
GRANT SELECT ON mysqltest_29908.t1 TO u29908_2@localhost;
connect u2,localhost,u29908_2,,mysqltest_29908;
ALTER VIEW v1 AS SELECT f2 FROM t1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
ALTER VIEW v2 AS SELECT f2 FROM t1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
SHOW CREATE VIEW v2;
View Create View character_set_client collation_connection
v2 CREATE ALGORITHM=UNDEFINED DEFINER=`u29908_1`@`localhost` SQL SECURITY INVOKER VIEW `v2` AS select `t1`.`f1` AS `f1` from `t1` latin1 latin1_swedish_ci
diff --git a/mysql-test/main/win.result b/mysql-test/main/win.result
index 21f2dbccd33..e8d24994140 100644
--- a/mysql-test/main/win.result
+++ b/mysql-test/main/win.result
@@ -1402,6 +1402,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1416,7 +1417,9 @@ EXPLAIN
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1437,6 +1440,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"window_functions_computation": {
@@ -1453,7 +1457,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1474,6 +1480,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1488,7 +1495,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1511,6 +1520,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "MX in (3,5,7)",
"filesort": {
"sort_key": "t1.b",
@@ -1528,7 +1538,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1665,6 +1677,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1679,7 +1692,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1697,6 +1712,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1711,7 +1727,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1729,6 +1747,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1743,7 +1762,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1761,6 +1782,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1775,7 +1797,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1821,6 +1845,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "row_number() over ( order by t1.s1,t1.s2) desc",
"window_functions_computation": {
@@ -1837,7 +1862,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 6,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1981,6 +2008,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"duplicate_removal": {
"window_functions_computation": {
"sorts": [
@@ -1996,7 +2024,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2161,6 +2191,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -2175,7 +2206,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2228,6 +2261,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -2242,7 +2276,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 6,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3827,6 +3863,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"window_functions_computation": {
@@ -3849,9 +3886,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
diff --git a/mysql-test/main/win.test b/mysql-test/main/win.test
index 0cc929ce46b..480eca274cf 100644
--- a/mysql-test/main/win.test
+++ b/mysql-test/main/win.test
@@ -927,11 +927,13 @@ drop table t0,t1;
create table t0 (a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+--source include/explain-no-costs.inc
explain format=json select rank() over (order by a) from t0;
create table t1 (a int, b int, c int);
insert into t1 select a,a,a from t0;
+--source include/explain-no-costs.inc
explain format=json
select
a,
@@ -939,6 +941,7 @@ select
from t1
group by a;
+--source include/explain-no-costs.inc
explain format=json
select
a,
@@ -952,6 +955,7 @@ order by null;
--echo #
select b,max(a) as MX, rank() over (order by b) from t1 group by b having MX in (3,5,7);
+--source include/explain-no-costs.inc
explain format=json
select b,max(a) as MX, rank() over (order by b) from t1 group by b having MX in (3,5,7);
@@ -1040,24 +1044,28 @@ from t1;
show status like '%sort%';
# Check using EXPLAIN FORMAT=JSON
+--source include/explain-no-costs.inc
explain format=json
select
rank() over (partition by c order by a),
rank() over (partition by c order by a)
from t1;
+--source include/explain-no-costs.inc
explain format=json
select
rank() over (order by a),
row_number() over (order by a)
from t1;
+--source include/explain-no-costs.inc
explain format=json
select
rank() over (partition by c order by a),
count(*) over (partition by c)
from t1;
+--source include/explain-no-costs.inc
explain format=json
select
count(*) over (partition by c),
@@ -1089,6 +1097,7 @@ insert into t1 values (null,'a');
insert into t1 values (2,'b');
insert into t1 values (-1,'');
+--source include/explain-no-costs.inc
explain format=json
select *, row_number() over (order by s1, s2) as X from t1 order by X desc;
select *, row_number() over (order by s1, s2) as X from t1 order by X desc;
@@ -1177,6 +1186,7 @@ insert into t1 values
select rank() over (partition by part_id order by a) from t1;
select distinct rank() over (partition by part_id order by a) from t1;
+--source include/explain-no-costs.inc
explain format=json
select distinct rank() over (partition by part_id order by a) from t1;
@@ -1301,6 +1311,7 @@ select pk, a, d,
sum(d) over (order by a
ROWS BETWEEN 1 preceding and 2 following) as sum_2
from t1;
+--source include/explain-no-costs.inc
explain format=json
select pk, a, d,
sum(d) over (partition by a order by pk
@@ -1336,6 +1347,7 @@ insert into t1 values
(10, 5, 1000),
(10, 1, 100);
+--source include/explain-no-costs.inc
explain format=json
select
a,b,c,
diff --git a/mysql-test/main/win_empty_over.result b/mysql-test/main/win_empty_over.result
index 4fa53bb4eae..2f75f80c6ad 100644
--- a/mysql-test/main/win_empty_over.result
+++ b/mysql-test/main/win_empty_over.result
@@ -36,6 +36,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -53,7 +54,9 @@ EXPLAIN
"key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["pk"],
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"using_index": true
}
@@ -68,6 +71,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -85,7 +89,9 @@ EXPLAIN
"key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["pk"],
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100,
"using_index": true
}
diff --git a/mysql-test/main/win_empty_over.test b/mysql-test/main/win_empty_over.test
index 91344d76865..dff15a41e30 100644
--- a/mysql-test/main/win_empty_over.test
+++ b/mysql-test/main/win_empty_over.test
@@ -21,7 +21,9 @@ insert into t1 values
(11, 2, 10, NULL, 0.9, NULL);
select pk, row_number() over () from t1;
+--source include/explain-no-costs.inc
explain FORMAT=JSON select pk, row_number() over () from t1;
+--source include/explain-no-costs.inc
explain FORMAT=JSON select row_number() over (), pk from t1;
select row_number() over () from (select 4) as t;
diff --git a/mysql-test/main/xtradb_mrr.result b/mysql-test/main/xtradb_mrr.result
index b4c91d9aff4..7d84f605e14 100644
--- a/mysql-test/main/xtradb_mrr.result
+++ b/mysql-test/main/xtradb_mrr.result
@@ -431,6 +431,12 @@ INSERT INTO `t1` VALUES (97,7,0,'z','z');
INSERT INTO `t1` VALUES (98,1,1,'j','j');
INSERT INTO `t1` VALUES (99,7,8,'c','c');
INSERT INTO `t1` VALUES (100,2,5,'f','f');
+EXPLAIN SELECT table1 .`col_varchar_key`
+FROM t1 table1 STRAIGHT_JOIN ( t1 table3 JOIN t1 table4 ON table4 .`pk` = table3 .`col_int_nokey` ) ON table4 .`col_varchar_nokey` ;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE table1 index NULL col_varchar_key 9 NULL # Using index
+1 SIMPLE table3 ALL NULL NULL NULL NULL # Using where; Using join buffer (flat, BNL join)
+1 SIMPLE table4 eq_ref PRIMARY PRIMARY 4 test.table3.col_int_nokey # Using where
SELECT table1 .`col_varchar_key`
FROM t1 table1 STRAIGHT_JOIN ( t1 table3 JOIN t1 table4 ON table4 .`pk` = table3 .`col_int_nokey` ) ON table4 .`col_varchar_nokey` ;
col_varchar_key
diff --git a/mysql-test/main/xtradb_mrr.test b/mysql-test/main/xtradb_mrr.test
index 9de9b192b06..f5cecc54a3f 100644
--- a/mysql-test/main/xtradb_mrr.test
+++ b/mysql-test/main/xtradb_mrr.test
@@ -151,6 +151,9 @@ INSERT INTO `t1` VALUES (97,7,0,'z','z');
INSERT INTO `t1` VALUES (98,1,1,'j','j');
INSERT INTO `t1` VALUES (99,7,8,'c','c');
INSERT INTO `t1` VALUES (100,2,5,'f','f');
+--replace_column 9 #
+EXPLAIN SELECT table1 .`col_varchar_key`
+FROM t1 table1 STRAIGHT_JOIN ( t1 table3 JOIN t1 table4 ON table4 .`pk` = table3 .`col_int_nokey` ) ON table4 .`col_varchar_nokey` ;
SELECT table1 .`col_varchar_key`
FROM t1 table1 STRAIGHT_JOIN ( t1 table3 JOIN t1 table4 ON table4 .`pk` = table3 .`col_int_nokey` ) ON table4 .`col_varchar_nokey` ;
DROP TABLE t1;
diff --git a/mysql-test/mariadb-test-run.pl b/mysql-test/mariadb-test-run.pl
index b9e338a4b8d..65c17801b16 100755
--- a/mysql-test/mariadb-test-run.pl
+++ b/mysql-test/mariadb-test-run.pl
@@ -2714,6 +2714,7 @@ sub mysql_server_start($) {
# Copy datadir from installed system db
my $path= ($opt_parallel == 1) ? "$opt_vardir" : "$opt_vardir/..";
my $install_db= "$path/install.db";
+ mtr_verbose("copying $install_db to $datadir");
copytree($install_db, $datadir) if -d $install_db;
mtr_error("Failed to copy system db to '$datadir'") unless -d $datadir;
}
diff --git a/mysql-test/std_data/bug47142_master-bin.000001 b/mysql-test/std_data/bug47142_master-bin.000001
deleted file mode 100644
index d1a089a784a..00000000000
--- a/mysql-test/std_data/bug47142_master-bin.000001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/std_data/master-bin.000001 b/mysql-test/std_data/master-bin.000001
deleted file mode 100644
index 2ec2397acdd..00000000000
--- a/mysql-test/std_data/master-bin.000001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/std_data/trunc_binlog.000001 b/mysql-test/std_data/trunc_binlog.000001
deleted file mode 100644
index 3da2490eab2..00000000000
--- a/mysql-test/std_data/trunc_binlog.000001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/archive/archive.result b/mysql-test/suite/archive/archive.result
index 022b400fd97..0a6ec04fb31 100644
--- a/mysql-test/suite/archive/archive.result
+++ b/mysql-test/suite/archive/archive.result
@@ -12488,6 +12488,10 @@ SELECT b FROM t5 WHERE a =3;
b
in order to form a more pefect union
foo this is mine to think about
+explain
+SELECT b FROM t5 WHERE a IN (32, 23, 5);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t5 ALL a NULL NULL NULL 11 Using where
SELECT b FROM t5 WHERE a IN (32, 23, 5);
b
foo grok
diff --git a/mysql-test/suite/archive/archive.test b/mysql-test/suite/archive/archive.test
index b6920827005..df5f6503321 100644
--- a/mysql-test/suite/archive/archive.test
+++ b/mysql-test/suite/archive/archive.test
@@ -1445,8 +1445,9 @@ INSERT INTO t5 VALUES (NULL, "promote the general welfare");
SELECT * FROM t5;
SELECT b FROM t5;
SELECT b FROM t5 WHERE a =3;
+explain
+SELECT b FROM t5 WHERE a IN (32, 23, 5);
SELECT b FROM t5 WHERE a IN (32, 23, 5);
-
#More blob tests
diff --git a/mysql-test/suite/binlog/r/binlog_base64_flag.result b/mysql-test/suite/binlog/r/binlog_base64_flag.result
index e325feb508b..4b75b712aee 100644
--- a/mysql-test/suite/binlog/r/binlog_base64_flag.result
+++ b/mysql-test/suite/binlog/r/binlog_base64_flag.result
@@ -1,7 +1,7 @@
call mtr.add_suppression("BINLOG_BASE64_EVENT: According to the master's version");
call mtr.add_suppression("BINLOG_BASE64_EVENT: Column 1 of table 'test.char128_utf8' cannot be converted");
-DROP TABLE IF EXISTS t1;
-==== Test BUG#32407 ====
+CREATE TABLE t1 (a int);
+INSERT INTO t1 VALUES (1), (1);
select * from t1;
a
1
@@ -49,35 +49,6 @@ a
SELECT @binlog_fragment_0, @binlog_fragment_1 as 'NULL','NULL';
@binlog_fragment_0 NULL NULL
NULL NULL NULL
-==== Test --base64-output=never on a binlog with row events ====
-/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/;
-/*!40019 SET @@session.max_delayed_threads=0*/;
-/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
-DELIMITER /*!*/;
-<#>
-ROLLBACK/*!*/;
-<#>
-use `test`/*!*/;
-SET TIMESTAMP=1196959712/*!*/;
-<#>SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=1, @@session.unique_checks=1, @@session.autocommit=1, @@session.check_constraint_checks=1, @@session.system_versioning_insert_history=0/*!*/;
-SET @@session.sql_mode=0/*!*/;
-SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
-/*!\C latin1 *//*!*/;
-SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8/*!*/;
-SET @@session.lc_time_names=0/*!*/;
-SET @@session.collation_database=DEFAULT/*!*/;
-create table t1 (a int) engine= myisam
-/*!*/;
-<#>
-<#>
-<#>
-<#>
-<#>
-DELIMITER ;
-# End of log file
-ROLLBACK /* added by mysqlbinlog */;
-/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
-/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;
==== Test non-matching FD event and Row event ====
BINLOG '
4CdYRw8BAAAAYgAAAGYAAAAAAAQANS4xLjE1LW5kYi02LjEuMjQtZGVidWctbG9nAAAAAAAAAAAA
diff --git a/mysql-test/suite/binlog/r/binlog_grant.result b/mysql-test/suite/binlog/r/binlog_grant.result
index edf705614eb..76cc415d568 100644
--- a/mysql-test/suite/binlog/r/binlog_grant.result
+++ b/mysql-test/suite/binlog/r/binlog_grant.result
@@ -16,7 +16,7 @@ set session sql_log_bin = 1;
connection plain;
[plain]
set session sql_log_bin = 1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
**** Variable BINLOG_FORMAT ****
connection root;
[root]
@@ -25,9 +25,9 @@ set session binlog_format = row;
connection plain;
[plain]
set global binlog_format = row;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
set session binlog_format = row;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
**** Clean up ****
disconnect plain;
disconnect root;
@@ -45,7 +45,10 @@ disconnect rpl;
connection default;
DROP USER 'mysqltest_1'@'localhost';
#
-# Start of 10.5 test
+# End of 10.4 tests
+#
+#
+# Start of 10.5 tests
#
#
# MDEV-21743 Split up SUPER privilege to smaller privileges
@@ -61,29 +64,29 @@ SHOW GRANTS FOR user1@localhost;
Grants for user1@localhost
GRANT USAGE ON *.* TO `user1`@`localhost`
DROP USER user1@localhost;
-# Test if SHOW BINARY LOGS and SHOW BINGLOG STATUS are not allowed without REPLICATION CLIENT or SUPER
+# Test if SHOW BINARY LOGS and SHOW BINGLOG STATUS are not allowed without REPLICATION CLIENT
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION CLIENT, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION CLIENT ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SHOW MASTER LOGS;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG MONITOR privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG MONITOR privilege(s) for this operation
SHOW BINARY LOGS;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG MONITOR privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG MONITOR privilege(s) for this operation
SHOW BINLOG STATUS;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG MONITOR privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG MONITOR privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test if PURGE BINARY LOGS is not allowed without BINLOG ADMIN or SUPER
+# Test if PURGE BINARY LOGS is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
PURGE BINARY LOGS BEFORE '2001-01-01 00:00:00';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -96,15 +99,6 @@ PURGE BINARY LOGS BEFORE '2001-01-01 00:00:00';
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test if PURGE BINLOG is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,"*NO-ONE*";
-connection user1;
-PURGE BINARY LOGS BEFORE '2001-01-01 00:00:00';
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
# Test if SHOW BINLOG EVENTS is not allowed without BINLOG MONITOR
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
@@ -142,7 +136,7 @@ connect user1,localhost,user1,,;
RENAME TABLE t1 to t2;
connection default;
REVOKE BINLOG REPLAY ON *.* FROM user1@localhost;
-call mtr.add_suppression("Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation");
+call mtr.add_suppression("Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation");
# Privilege errors are expected now:
connection user1;
connection default;
diff --git a/mysql-test/suite/binlog/r/binlog_old_versions.result b/mysql-test/suite/binlog/r/binlog_old_versions.result
deleted file mode 100644
index 30b64535eb4..00000000000
--- a/mysql-test/suite/binlog/r/binlog_old_versions.result
+++ /dev/null
@@ -1,70 +0,0 @@
-==== Read binlog with v2 row events ====
-SELECT * FROM t1 ORDER BY a;
-a b
-0 last_insert_id
-1 one
-3 last stm in trx: next event should be xid
-4 four
-62046 random
-SELECT * FROM t2 ORDER BY a;
-a b
-3 first stm in trx
-SELECT COUNT(*) FROM t3;
-COUNT(*)
-17920
-DROP TABLE t1, t2, t3;
-==== Read modern binlog (version 5.1.23) ====
-SELECT * FROM t1 ORDER BY a;
-a b
-0 last_insert_id
-1 one
-3 last stm in trx: next event should be xid
-4 four
-674568 random
-SELECT * FROM t2 ORDER BY a;
-a b
-3 first stm in trx
-SELECT COUNT(*) FROM t3;
-COUNT(*)
-17920
-DROP TABLE t1, t2, t3;
-==== Read binlog from version 5.1.17 ====
-SELECT * FROM t1 ORDER BY a;
-a b
-0 last_insert_id
-1 one
-3 last stm in trx: next event should be xid
-4 four
-764247 random
-SELECT * FROM t2 ORDER BY a;
-a b
-3 first stm in trx
-SELECT COUNT(*) FROM t3;
-COUNT(*)
-17920
-DROP TABLE t1, t2, t3;
-==== Read binlog from version 4.1 ====
-SELECT * FROM t1 ORDER BY a;
-a b
-0 last_insert_id
-4 four
-190243 random
-SELECT COUNT(*) FROM t3;
-COUNT(*)
-17920
-DROP TABLE t1, t3;
-==== Read binlog from telco tree (mysql-5.1-telco-6.1) ====
-SELECT * FROM t1 ORDER BY a;
-a b
-0 last_insert_id
-1 one
-3 last stm in trx: next event should be xid
-4 four
-703356 random
-SELECT * FROM t2 ORDER BY a;
-a b
-3 first stm in trx
-SELECT COUNT(*) FROM t3;
-COUNT(*)
-17920
-DROP TABLE t1, t2, t3;
diff --git a/mysql-test/suite/binlog/r/binlog_unsafe.result b/mysql-test/suite/binlog/r/binlog_unsafe.result
index 0c0b0e77915..67849fc7fd1 100644
--- a/mysql-test/suite/binlog/r/binlog_unsafe.result
+++ b/mysql-test/suite/binlog/r/binlog_unsafe.result
@@ -2338,11 +2338,7 @@ Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave
UPDATE t1 SET a=1 LIMIT 1;
-Warnings:
-Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted
DELETE FROM t1 LIMIT 1;
-Warnings:
-Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted
CREATE PROCEDURE p1()
BEGIN
INSERT INTO t1 SELECT * FROM t1 LIMIT 1;
diff --git a/mysql-test/suite/binlog/std_data/binlog_old_version_4_1.000001 b/mysql-test/suite/binlog/std_data/binlog_old_version_4_1.000001
deleted file mode 100644
index 66db9668d46..00000000000
--- a/mysql-test/suite/binlog/std_data/binlog_old_version_4_1.000001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/std_data/bug32407.001 b/mysql-test/suite/binlog/std_data/bug32407.001
deleted file mode 100644
index c73243707ef..00000000000
--- a/mysql-test/suite/binlog/std_data/bug32407.001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/std_data/ver_5_1-telco.001 b/mysql-test/suite/binlog/std_data/ver_5_1-telco.001
deleted file mode 100644
index 76856cb04a2..00000000000
--- a/mysql-test/suite/binlog/std_data/ver_5_1-telco.001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/std_data/ver_5_1_17.001 b/mysql-test/suite/binlog/std_data/ver_5_1_17.001
deleted file mode 100644
index 9b6e200e492..00000000000
--- a/mysql-test/suite/binlog/std_data/ver_5_1_17.001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/std_data/ver_5_1_23.001 b/mysql-test/suite/binlog/std_data/ver_5_1_23.001
deleted file mode 100644
index 0e9a9d1470a..00000000000
--- a/mysql-test/suite/binlog/std_data/ver_5_1_23.001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/std_data/ver_trunk_row_v2.001 b/mysql-test/suite/binlog/std_data/ver_trunk_row_v2.001
deleted file mode 100644
index 28360beca68..00000000000
--- a/mysql-test/suite/binlog/std_data/ver_trunk_row_v2.001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/t/binlog_base64_flag.test b/mysql-test/suite/binlog/t/binlog_base64_flag.test
index 5311da54f5f..6935f69ba36 100644
--- a/mysql-test/suite/binlog/t/binlog_base64_flag.test
+++ b/mysql-test/suite/binlog/t/binlog_base64_flag.test
@@ -2,9 +2,6 @@
# work as expected, and that BINLOG statements with row events fail if
# they are not preceded by BINLOG statements with Format description
# events.
-#
-# See also BUG#32407.
-
# BINLOG statement does not work in embedded mode.
source include/not_embedded.inc;
@@ -12,23 +9,10 @@ source include/not_embedded.inc;
call mtr.add_suppression("BINLOG_BASE64_EVENT: According to the master's version");
call mtr.add_suppression("BINLOG_BASE64_EVENT: Column 1 of table 'test.char128_utf8' cannot be converted");
-disable_warnings;
-DROP TABLE IF EXISTS t1;
-enable_warnings;
-# Test to show BUG#32407. This reads a binlog created with the
-# mysql-5.1-telco-6.1 tree, specifically at the tag
-# mysql-5.1.15-ndb-6.1.23, and applies it to the database. The test
-# should fail before BUG#32407 was fixed and succeed afterwards.
---echo ==== Test BUG#32407 ====
-
-# The binlog contains row events equivalent to:
-# CREATE TABLE t1 (a int) engine = myisam
-# INSERT INTO t1 VALUES (1), (1)
-exec $MYSQL_BINLOG suite/binlog/std_data/bug32407.001 | $MYSQL;
-# The above line should succeed and t1 should contain two ones
+CREATE TABLE t1 (a int);
+INSERT INTO t1 VALUES (1), (1);
select * from t1;
-
# Test that a BINLOG statement encoding a row event fails unless a
# Format_description_event as been supplied with an earlier BINLOG
# statement.
@@ -92,14 +76,6 @@ select * from t1;
# show "one-shot" feature of binlog_fragment variables
SELECT @binlog_fragment_0, @binlog_fragment_1 as 'NULL','NULL';
-# New mysqlbinlog supports --base64-output=never
---echo ==== Test --base64-output=never on a binlog with row events ====
-
-# mysqlbinlog should fail
---replace_regex /#[0-9][0-9][0-9][0-9][0-9][0-9] \N*/<#>/ /SET \@\@session.pseudo_thread_id.*/<#>/
-exec $MYSQL_BINLOG --base64-output=never --print-row-count=0 --print-row-event-positions=0 suite/binlog/std_data/bug32407.001;
-
-
# Test that the following fails cleanly: "First, read a
# Format_description event which has N event types. Then, read an
# event of type M>N"
diff --git a/mysql-test/suite/binlog/t/binlog_expire_warnings.opt b/mysql-test/suite/binlog/t/binlog_expire_warnings.opt
new file mode 100644
index 00000000000..c85ef7d3e04
--- /dev/null
+++ b/mysql-test/suite/binlog/t/binlog_expire_warnings.opt
@@ -0,0 +1 @@
+--log-warnings=4
diff --git a/mysql-test/suite/binlog/t/binlog_grant.test b/mysql-test/suite/binlog/t/binlog_grant.test
index d573281f691..5ece793e4a3 100644
--- a/mysql-test/suite/binlog/t/binlog_grant.test
+++ b/mysql-test/suite/binlog/t/binlog_grant.test
@@ -30,7 +30,6 @@ connection plain;
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
set session sql_log_bin = 1;
-
# Testing setting both session and global BINLOG_FORMAT variable both
# as root and as plain user.
@@ -56,7 +55,6 @@ connection default;
set global binlog_format = @saved_binlog_format;
drop user mysqltest_1@localhost;
-
# Testing if REPLICATION CLIENT privilege is enough to execute
# SHOW MASTER LOGS and SHOW BINARY.
CREATE USER 'mysqltest_1'@'localhost';
@@ -77,9 +75,12 @@ SHOW BINLOG STATUS;
connection default;
DROP USER 'mysqltest_1'@'localhost';
+--echo #
+--echo # End of 10.4 tests
+--echo #
--echo #
---echo # Start of 10.5 test
+--echo # Start of 10.5 tests
--echo #
--echo #
@@ -95,11 +96,10 @@ REVOKE REPLICATION CLIENT ON *.* FROM user1@localhost;
SHOW GRANTS FOR user1@localhost;
DROP USER user1@localhost;
-
---echo # Test if SHOW BINARY LOGS and SHOW BINGLOG STATUS are not allowed without REPLICATION CLIENT or SUPER
+--echo # Test if SHOW BINARY LOGS and SHOW BINGLOG STATUS are not allowed without REPLICATION CLIENT
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION CLIENT, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION CLIENT ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -112,11 +112,10 @@ SHOW BINLOG STATUS;
--connection default
DROP USER user1@localhost;
-
---echo # Test if PURGE BINARY LOGS is not allowed without BINLOG ADMIN or SUPER
+--echo # Test if PURGE BINARY LOGS is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -125,7 +124,6 @@ PURGE BINARY LOGS BEFORE '2001-01-01 00:00:00';
--connection default
DROP USER user1@localhost;
-
--echo # Test if PURGE BINLOG is allowed with BINLOG ADMIN
CREATE USER user1@localhost;
GRANT BINLOG ADMIN ON *.* TO user1@localhost;
@@ -136,18 +134,6 @@ PURGE BINARY LOGS BEFORE '2001-01-01 00:00:00';
connection default;
DROP USER user1@localhost;
-
---echo # Test if PURGE BINLOG is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,"*NO-ONE*")
---connection user1
-PURGE BINARY LOGS BEFORE '2001-01-01 00:00:00';
---disconnect user1
-connection default;
-DROP USER user1@localhost;
-
-
--echo # Test if SHOW BINLOG EVENTS is not allowed without BINLOG MONITOR
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
@@ -160,7 +146,6 @@ SHOW BINLOG EVENTS;
--connection default
DROP USER user1@localhost;
-
--echo # Test if SHOW BINLOG EVENTS is allowed with BINLOG MONITOR
CREATE USER user1@localhost;
GRANT BINLOG MONITOR ON *.* TO user1@localhost;
@@ -196,7 +181,7 @@ RENAME TABLE t1 to t2;
--connection default
REVOKE BINLOG REPLAY ON *.* FROM user1@localhost;
-call mtr.add_suppression("Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation");
+call mtr.add_suppression("Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation");
--echo # Privilege errors are expected now:
--connection user1
--error 1
diff --git a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test
index f95fc0137a2..8ec3856dcb5 100644
--- a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test
+++ b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test
@@ -26,7 +26,7 @@ FLUSH LOGS;
INSERT INTO t1 VALUES (1);
# Read binlog data from master to intermediary result file
---let TIMEOUT=1
+--let TIMEOUT=5
--echo # timeout TIMEOUT MYSQL_BINLOG --raw --read-from-remote-server --user=root --host=127.0.0.1 --port=MASTER_MYPORT --stop-never --result-file=MYSQLTEST_VARDIR/tmp/ master-bin.000001
--error 124 # Error 124 means timeout was reached
--exec timeout $TIMEOUT $MYSQL_BINLOG --raw --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT --stop-never --result-file=$MYSQLTEST_VARDIR/tmp/ master-bin.000001
diff --git a/mysql-test/suite/binlog/t/binlog_old_versions.test b/mysql-test/suite/binlog/t/binlog_old_versions.test
deleted file mode 100644
index 130101541e3..00000000000
--- a/mysql-test/suite/binlog/t/binlog_old_versions.test
+++ /dev/null
@@ -1,153 +0,0 @@
-# Test that old binlog formats can be read.
-
-# Some previous versions of MySQL use their own binlog format,
-# especially in row-based replication. This test uses saved binlogs
-# from those old versions to test that we can replicate from old
-# versions to the present version.
-
-# Replicating from old versions to new versions is necessary in an
-# online upgrade scenario, where the .
-
-# The previous versions we currently test are:
-# - version 5.1.17 and earlier trees
-# - mysql-5.1-wl2325-xxx trees (AKA alcatel trees)
-# - mysql-5.1-telco-6.1 trees
-# For completeness, we also test mysql-5.1-new_rpl, which is supposed
-# to be the "correct" version.
-
-# All binlogs were generated with the same commands (listed at the end
-# of this test for reference). The binlogs contain the following
-# events: Table_map, Write_rows, Update_rows, Delete_rows Query, Xid,
-# User_var, Int_var, Rand, Begin_load, Append_file, Execute_load.
-
-# Related bugs: BUG#27779, BUG#31581, BUG#31582, BUG#31583, BUG#32407
-
-source include/not_embedded.inc;
-
---echo ==== Read binlog with v2 row events ====
-
-# Read binlog.
---exec $MYSQL_BINLOG --local-load=$MYSQLTEST_VARDIR/tmp/ suite/binlog/std_data/ver_trunk_row_v2.001 | $MYSQL --local-infile=1
-# Show result.
-SELECT * FROM t1 ORDER BY a;
-SELECT * FROM t2 ORDER BY a;
-SELECT COUNT(*) FROM t3;
-# Reset.
-DROP TABLE t1, t2, t3;
-
-
---echo ==== Read modern binlog (version 5.1.23) ====
-
-# Read binlog.
---exec $MYSQL_BINLOG --local-load=$MYSQLTEST_VARDIR/tmp/ suite/binlog/std_data/ver_5_1_23.001 | $MYSQL --local-infile=1
-# Show result.
-SELECT * FROM t1 ORDER BY a;
-SELECT * FROM t2 ORDER BY a;
-SELECT COUNT(*) FROM t3;
-# Reset.
-DROP TABLE t1, t2, t3;
-
-
---echo ==== Read binlog from version 5.1.17 ====
-
-# Read binlog.
---exec $MYSQL_BINLOG --local-load=$MYSQLTEST_VARDIR/tmp/ suite/binlog/std_data/ver_5_1_17.001 | $MYSQL --local-infile=1
-# Show result.
-SELECT * FROM t1 ORDER BY a;
-SELECT * FROM t2 ORDER BY a;
-SELECT COUNT(*) FROM t3;
-# Reset.
-DROP TABLE t1, t2, t3;
-
-
---echo ==== Read binlog from version 4.1 ====
-
-# In this version, neither row-based binlogging nor Xid events
-# existed, so the binlog was generated without the "row-based tests"
-# part and the "get xid event" part, and it does not create table t2.
-
-# Read binlog.
---exec $MYSQL_BINLOG --local-load=$MYSQLTEST_VARDIR/tmp/ suite/binlog/std_data/binlog_old_version_4_1.000001 | $MYSQL --local-infile=1
-# Show result.
-SELECT * FROM t1 ORDER BY a;
-SELECT COUNT(*) FROM t3;
-# Reset.
-DROP TABLE t1, t3;
-
-
---echo ==== Read binlog from telco tree (mysql-5.1-telco-6.1) ====
-
-# Read binlog.
---exec $MYSQL_BINLOG --local-load=$MYSQLTEST_VARDIR/tmp/ suite/binlog/std_data/ver_5_1-telco.001 | $MYSQL --local-infile=1
-# Show resulting tablea.
-SELECT * FROM t1 ORDER BY a;
-SELECT * FROM t2 ORDER BY a;
-SELECT COUNT(*) FROM t3;
-# Reset.
-DROP TABLE t1, t2, t3;
-
-
-#### The following commands were used to generate the binlogs ####
-#
-#source include/master-slave.inc;
-#
-## ==== initialize ====
-#USE test;
-#CREATE TABLE t1 (a int, b char(50)) ENGINE = MyISAM;
-#CREATE TABLE t2 (a int, b char(50)) ENGINE = InnoDB;
-#CREATE TABLE t3 (a char(20));
-#
-#
-## ==== row based tests ====
-#SET BINLOG_FORMAT='row';
-#
-## ---- get write, update, and delete rows events ----
-#INSERT INTO t1 VALUES (0, 'one'), (1, 'two');
-#UPDATE t1 SET a=a+1;
-#DELETE FROM t1 WHERE a=2;
-#
-#
-## ==== statement based tests ====
-#SET BINLOG_FORMAT = 'statement';
-#
-## ---- get xid events ----
-#BEGIN;
-#INSERT INTO t2 VALUES (3, 'first stm in trx');
-#INSERT INTO t1 VALUES (3, 'last stm in trx: next event should be xid');
-#COMMIT;
-#
-## ---- get user var events ----
-#SET @x = 4;
-#INSERT INTO t1 VALUES (@x, 'four');
-#
-## ---- get rand event ----
-#INSERT INTO t1 VALUES (RAND() * 1000000, 'random');
-#
-## ---- get intvar event ----
-#INSERT INTO t1 VALUES (LAST_INSERT_ID(), 'last_insert_id');
-#
-## ---- get begin, append and execute load events ----
-## double the file until we have more than 2^17 bytes, so that the
-## event has to be split and we can use Append_file_log_event.
-#
-#SET SQL_LOG_BIN=0;
-#CREATE TABLE temp (a char(20));
-#LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#SELECT a FROM temp INTO OUTFILE 'big_file.dat';
-#DROP TABLE temp;
-#SET SQL_LOG_BIN=1;
-#
-#LOAD DATA INFILE 'big_file.dat' INTO TABLE t3;
-#
-#SELECT * FROM t1 ORDER BY a;
-#SELECT * FROM t2 ORDER BY a;
-#SELECT COUNT(*) FROM t3;
-#--source include/rpl_end.inc
diff --git a/mysql-test/suite/binlog/t/binlog_truncate_multi_engine.test b/mysql-test/suite/binlog/t/binlog_truncate_multi_engine.test
index 12b0a743916..61d097a8af7 100644
--- a/mysql-test/suite/binlog/t/binlog_truncate_multi_engine.test
+++ b/mysql-test/suite/binlog/t/binlog_truncate_multi_engine.test
@@ -12,6 +12,7 @@
--source include/have_debug.inc
--source include/have_debug_sync.inc
--source include/have_binlog_format_row.inc
+--source include/not_valgrind.inc
--let $old_max_binlog_size= `select @@global.max_binlog_size`
call mtr.add_suppression("Can.t init tc log");
diff --git a/mysql-test/suite/binlog_encryption/rpl_skip_replication.result b/mysql-test/suite/binlog_encryption/rpl_skip_replication.result
index 96e0a30331d..c17ffbb5e47 100644
--- a/mysql-test/suite/binlog_encryption/rpl_skip_replication.result
+++ b/mysql-test/suite/binlog_encryption/rpl_skip_replication.result
@@ -12,7 +12,7 @@ SELECT,UPDATE ON *.* TO 'nonsuperuser'@'127.0.0.1';
connect nonpriv, 127.0.0.1, nonsuperuser,, test, $SLAVE_MYPORT,;
connection nonpriv;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
disconnect nonpriv;
connection slave;
DROP USER'nonsuperuser'@'127.0.0.1';
diff --git a/mysql-test/suite/binlog_encryption/rpl_sync-master.opt b/mysql-test/suite/binlog_encryption/rpl_sync-master.opt
index 04b06bfa0f2..96f0ce3f36c 100644
--- a/mysql-test/suite/binlog_encryption/rpl_sync-master.opt
+++ b/mysql-test/suite/binlog_encryption/rpl_sync-master.opt
@@ -1,2 +1 @@
--default-storage-engine=MyISAM
---loose-innodb-file-per-table=0
diff --git a/mysql-test/suite/binlog_encryption/rpl_sync-slave.opt b/mysql-test/suite/binlog_encryption/rpl_sync-slave.opt
index 795330535c1..d1a481cd37d 100644
--- a/mysql-test/suite/binlog_encryption/rpl_sync-slave.opt
+++ b/mysql-test/suite/binlog_encryption/rpl_sync-slave.opt
@@ -1,2 +1,2 @@
---sync-relay-log-info=1 --relay-log-recovery=1 --default-storage-engine=MyISAM --loose-innodb-file-per-table=0
+--sync-relay-log-info=1 --relay-log-recovery=1 --default-storage-engine=MyISAM
--skip-core-file --skip-slave-start
diff --git a/mysql-test/suite/compat/oracle/r/sp-package-innodb.result b/mysql-test/suite/compat/oracle/r/sp-package-innodb.result
index 0ac357df5da..50eb2dc6cd0 100644
--- a/mysql-test/suite/compat/oracle/r/sp-package-innodb.result
+++ b/mysql-test/suite/compat/oracle/r/sp-package-innodb.result
@@ -23,8 +23,6 @@ a:=a+1;
INSERT INTO t1 VALUES (a,'pkg1 initialization');
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL pkg1.p1;
SELECT * FROM t1 ORDER BY a;
a routine
diff --git a/mysql-test/suite/compat/oracle/r/sp-package.result b/mysql-test/suite/compat/oracle/r/sp-package.result
index ef0acea5da1..ef4b3f5db1f 100644
--- a/mysql-test/suite/compat/oracle/r/sp-package.result
+++ b/mysql-test/suite/compat/oracle/r/sp-package.result
@@ -2062,8 +2062,6 @@ $$
CALL p1.p1();
@a
11
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1.p1();
@a
12
@@ -2095,8 +2093,6 @@ BEGIN
SELECT MAX(a) FROM t1 INTO @a;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1.p1();
@a
11
@@ -2130,8 +2126,6 @@ BEGIN
SELECT 1 FROM t1 INTO @a;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1.p1();
ERROR 42S02: Table 'test.t1' doesn't exist
SELECT p1.f1();
@@ -2690,9 +2684,6 @@ SELECT * FROM t1 INTO b;
SELECT b.a, b.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1.p1;
b.a b.b
10 b
diff --git a/mysql-test/suite/compat/oracle/r/sp-row.result b/mysql-test/suite/compat/oracle/r/sp-row.result
index 0b23f303030..b3a0ae15711 100644
--- a/mysql-test/suite/compat/oracle/r/sp-row.result
+++ b/mysql-test/suite/compat/oracle/r/sp-row.result
@@ -2835,8 +2835,6 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -2851,8 +2849,6 @@ SELECT * FROM t1 INTO rec1, rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -2867,8 +2863,6 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
@@ -2884,8 +2878,6 @@ SELECT 10,'a','b' FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -2900,8 +2892,6 @@ SELECT 10,'a' FROM t1 INTO rec1, rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -2916,8 +2906,6 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
@@ -2934,8 +2922,6 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -2951,8 +2937,6 @@ SELECT * FROM t1 INTO rec1, rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -2968,8 +2952,6 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
diff --git a/mysql-test/suite/compat/oracle/r/table_value_constr.result b/mysql-test/suite/compat/oracle/r/table_value_constr.result
index af071433d0f..65e31761b90 100644
--- a/mysql-test/suite/compat/oracle/r/table_value_constr.result
+++ b/mysql-test/suite/compat/oracle/r/table_value_constr.result
@@ -565,12 +565,12 @@ where t1.a=t2.a and st<3
select * from t2;
a b st
1 1 1
-1 2 2
1 1 2
-1 2 3
-1 2 3
1 1 3
1 1 3
+1 2 2
+1 2 3
+1 2 3
# recursive CTE that uses VALUES structure(s) : computation of factorial (first 10 elements)
with recursive fact(n,f) as
(
@@ -741,21 +741,19 @@ a b
explain extended select * from t1
where a in (values (1));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived2> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1)) "tvc_0") where 1
+Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from (values (1)) "tvc_0" join "test"."t1" where "tvc_0"."1" = "test"."t1"."a"
explain extended select * from t1
where a in (select * from (values (1)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1)) "tvc_0") where 1
+Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from (values (1)) "tvc_0" join "test"."t1" where "tvc_0"."1" = "test"."t1"."a"
# IN-subquery with VALUES structure(s) : UNION with VALUES on the first place
select * from t1
where a in (values (1) union select 2);
@@ -774,7 +772,7 @@ explain extended select * from t1
where a in (values (1) union select 2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-4 DEPENDENT SUBQUERY <derived2> ref key0 key0 4 func 2 100.00
+4 DEPENDENT SUBQUERY <derived2> eq_ref distinct_key distinct_key 4 func 1 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union4,3> ALL NULL NULL NULL NULL NULL NULL
@@ -785,7 +783,7 @@ where a in (select * from (values (1)) as tvc_0 union
select 2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> ref key0 key0 4 func 2 100.00
+2 DEPENDENT SUBQUERY <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
4 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,4> ALL NULL NULL NULL NULL NULL NULL
@@ -810,7 +808,7 @@ where a in (select 2 union values (1));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-4 DEPENDENT UNION <derived3> ref key0 key0 4 func 2 100.00
+4 DEPENDENT UNION <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,4> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -821,7 +819,7 @@ select * from (values (1)) tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-3 DEPENDENT UNION <derived4> ref key0 key0 4 func 2 100.00
+3 DEPENDENT UNION <derived4> ref key0 key0 4 func 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -846,7 +844,7 @@ explain extended select * from t1
where a in (values (1) union all select b from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-4 DEPENDENT SUBQUERY <derived2> ref key0 key0 4 func 2 100.00
+4 DEPENDENT SUBQUERY <derived2> eq_ref distinct_key distinct_key 4 func 1 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
3 DEPENDENT UNION t1 ALL NULL NULL NULL NULL 6 100.00 Using where
Warnings:
@@ -856,7 +854,7 @@ where a in (select * from (values (1)) as tvc_0 union all
select b from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> ref key0 key0 4 func 2 100.00
+2 DEPENDENT SUBQUERY <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
4 DEPENDENT UNION t1 ALL NULL NULL NULL NULL 6 100.00 Using where
Warnings:
@@ -878,18 +876,18 @@ explain extended select * from t1
where a not in (values (1),(2));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-3 MATERIALIZED <derived2> ALL NULL NULL NULL NULL 2 100.00
+3 DEPENDENT SUBQUERY <derived2> unique_subquery distinct_key distinct_key 4 func 1 100.00 Using where; Full scan on NULL key
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where !<expr_cache><"test"."t1"."a">(<in_optimizer>("test"."t1"."a","test"."t1"."a" in ( <materialize> (/* select#3 */ select "tvc_0"."1" from (values (1),(2)) "tvc_0" ), <primary_index_lookup>("test"."t1"."a" in <temporary table> on distinct_key where "test"."t1"."a" = "<subquery3>"."1"))))
+Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where !<expr_cache><"test"."t1"."a">(<in_optimizer>("test"."t1"."a",<exists>(<primary_index_lookup>(<cache>("test"."t1"."a") in <temporary table> on distinct_key where trigcond(<cache>("test"."t1"."a") = "tvc_0"."1")))))
explain extended select * from t1
where a not in (select * from (values (1),(2)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
+2 DEPENDENT SUBQUERY <derived3> unique_subquery distinct_key distinct_key 4 func 1 100.00 Using where; Full scan on NULL key
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where !<expr_cache><"test"."t1"."a">(<in_optimizer>("test"."t1"."a","test"."t1"."a" in ( <materialize> (/* select#2 */ select "tvc_0"."1" from (values (1),(2)) "tvc_0" ), <primary_index_lookup>("test"."t1"."a" in <temporary table> on distinct_key where "test"."t1"."a" = "<subquery2>"."1"))))
+Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" where !<expr_cache><"test"."t1"."a">(<in_optimizer>("test"."t1"."a",<exists>(<primary_index_lookup>(<cache>("test"."t1"."a") in <temporary table> on distinct_key where trigcond(<cache>("test"."t1"."a") = "tvc_0"."1")))))
# NOT IN subquery with VALUES structure(s) : UNION with VALUES on the first place
select * from t1
where a not in (values (1) union select 2);
@@ -976,21 +974,19 @@ a b
explain extended select * from t1
where a = any (values (1),(2));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 100.00
-3 MATERIALIZED <derived2> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1),(2)) "tvc_0") where 1
+Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from (values (1),(2)) "tvc_0" join "test"."t1" where "tvc_0"."1" = "test"."t1"."a"
explain extended select * from t1
where a = any (select * from (values (1),(2)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
-2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
+1 PRIMARY <derived3> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from "test"."t1" semi join ((values (1),(2)) "tvc_0") where 1
+Note 1003 /* select#1 */ select "test"."t1"."a" AS "a","test"."t1"."b" AS "b" from (values (1),(2)) "tvc_0" join "test"."t1" where "tvc_0"."1" = "test"."t1"."a"
# ANY-subquery with VALUES structure(s) : UNION with VALUES on the first place
select * from t1
where a = any (values (1) union select 2);
@@ -1009,7 +1005,7 @@ explain extended select * from t1
where a = any (values (1) union select 2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-4 DEPENDENT SUBQUERY <derived2> ref key0 key0 4 func 2 100.00
+4 DEPENDENT SUBQUERY <derived2> eq_ref distinct_key distinct_key 4 func 1 100.00
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union4,3> ALL NULL NULL NULL NULL NULL NULL
@@ -1020,7 +1016,7 @@ where a = any (select * from (values (1)) as tvc_0 union
select 2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
-2 DEPENDENT SUBQUERY <derived3> ref key0 key0 4 func 2 100.00
+2 DEPENDENT SUBQUERY <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
4 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,4> ALL NULL NULL NULL NULL NULL NULL
@@ -1045,7 +1041,7 @@ where a = any (select 2 union values (1));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-4 DEPENDENT UNION <derived3> ref key0 key0 4 func 2 100.00
+4 DEPENDENT UNION <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,4> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -1056,7 +1052,7 @@ select * from (values (1)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-3 DEPENDENT UNION <derived4> ref key0 key0 4 func 2 100.00
+3 DEPENDENT UNION <derived4> ref key0 key0 4 func 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -1138,7 +1134,7 @@ where a = any (select 1 union values (1));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-4 DEPENDENT UNION <derived3> ref key0 key0 4 func 2 100.00
+4 DEPENDENT UNION <derived3> eq_ref distinct_key distinct_key 4 func 1 100.00
3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,4> ALL NULL NULL NULL NULL NULL NULL
Warnings:
@@ -1149,7 +1145,7 @@ select * from (values (1)) as tvc_0);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 6 100.00 Using where
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-3 DEPENDENT UNION <derived4> ref key0 key0 4 func 2 100.00
+3 DEPENDENT UNION <derived4> ref key0 key0 4 func 1 100.00
4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
diff --git a/mysql-test/suite/compat/oracle/r/update_innodb.result b/mysql-test/suite/compat/oracle/r/update_innodb.result
index 1dae643eeff..0c9922fe19e 100644
--- a/mysql-test/suite/compat/oracle/r/update_innodb.result
+++ b/mysql-test/suite/compat/oracle/r/update_innodb.result
@@ -6,8 +6,6 @@ CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY) engine=innodb;
INSERT INTO t1 VALUES (1);
START TRANSACTION;
SELECT a AS a_con1 FROM t1 INTO @a FOR UPDATE;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
connect con2,localhost,root,,;
SET sql_mode='ORACLE';
START TRANSACTION;
@@ -16,8 +14,6 @@ connection default;
UPDATE t1 SET a=a+100;
COMMIT;
connection con2;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT a AS con2 FROM t1;
con2
101
diff --git a/mysql-test/suite/compat/oracle/t/table_value_constr.test b/mysql-test/suite/compat/oracle/t/table_value_constr.test
index ca3c40bb7f9..7c1463e27af 100644
--- a/mysql-test/suite/compat/oracle/t/table_value_constr.test
+++ b/mysql-test/suite/compat/oracle/t/table_value_constr.test
@@ -424,6 +424,7 @@ select * from t2;
--echo # recursive CTE that uses VALUES structure(s) : that uses UNION ALL
+--sorted_result
with recursive t2(a,b,st) as
(
values(1,1,1)
diff --git a/mysql-test/suite/encryption/r/encrypt_and_grep.result b/mysql-test/suite/encryption/r/encrypt_and_grep.result
index 72d612eeac8..e52ec0e453f 100644
--- a/mysql-test/suite/encryption/r/encrypt_and_grep.result
+++ b/mysql-test/suite/encryption/r/encrypt_and_grep.result
@@ -1,4 +1,3 @@
-SET GLOBAL innodb_file_per_table = ON;
create table t1 (a varchar(255)) engine=innodb encrypted=yes;
create table t2 (a varchar(255)) engine=innodb;
show warnings;
diff --git a/mysql-test/suite/encryption/r/innochecksum.result b/mysql-test/suite/encryption/r/innochecksum.result
index 7c68164e52a..dc0bf63a71e 100644
--- a/mysql-test/suite/encryption/r/innochecksum.result
+++ b/mysql-test/suite/encryption/r/innochecksum.result
@@ -1,4 +1,3 @@
-SET GLOBAL innodb_file_per_table = ON;
set global innodb_compression_algorithm = 1;
# Create and populate a tables
CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change.result b/mysql-test/suite/encryption/r/innodb-bad-key-change.result
index e2034f14e31..eb114fcf6fc 100644
--- a/mysql-test/suite/encryption/r/innodb-bad-key-change.result
+++ b/mysql-test/suite/encryption/r/innodb-bad-key-change.result
@@ -9,7 +9,6 @@ call mtr.add_suppression("File '.*mysql-test.std_data.keysbad3\\.txt' not found"
call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot decrypt \\[page id: space=");
# Start server with keys2.txt
# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
-SET GLOBAL innodb_file_per_table = ON;
CREATE TABLE t1 (c VARCHAR(8)) ENGINE=InnoDB ENCRYPTED=YES ENCRYPTION_KEY_ID=2;
INSERT INTO t1 VALUES ('foobar');
ALTER TABLE t1 ADD COLUMN c2 INT;
diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change2.result b/mysql-test/suite/encryption/r/innodb-bad-key-change2.result
index 3cda2bd537b..ab67b6fedad 100644
--- a/mysql-test/suite/encryption/r/innodb-bad-key-change2.result
+++ b/mysql-test/suite/encryption/r/innodb-bad-key-change2.result
@@ -8,7 +8,6 @@ call mtr.add_suppression("InnoDB: Cannot delete tablespace .* because it is not
call mtr.add_suppression("InnoDB: ALTER TABLE `test`\\.`t1` DISCARD TABLESPACE failed to find tablespace");
call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot decrypt \\[page id: space=");
# restart: --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
-SET GLOBAL innodb_file_per_table = ON;
CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB
ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change4.result b/mysql-test/suite/encryption/r/innodb-bad-key-change4.result
index e808d50b544..9983c26c9d7 100644
--- a/mysql-test/suite/encryption/r/innodb-bad-key-change4.result
+++ b/mysql-test/suite/encryption/r/innodb-bad-key-change4.result
@@ -5,7 +5,6 @@ call mtr.add_suppression("Couldn't load plugins from 'file_key_management");
call mtr.add_suppression("Table .*t1.* is corrupted");
call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot decrypt \\[page id: space=");
# restart: --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
-SET GLOBAL innodb_file_per_table = ON;
CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB
ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
diff --git a/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result b/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result
index 7b97eb6b5bd..f88f18d0928 100644
--- a/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result
+++ b/mysql-test/suite/encryption/r/innodb-checksum-algorithm.result
@@ -1,8 +1,6 @@
-SET @saved_file_per_table = @@global.innodb_file_per_table;
SET @saved_encrypt_tables = @@global.innodb_encrypt_tables;
SET @saved_encryption_threads = @@global.innodb_encryption_threads;
SET @saved_encryption_key_id = @@global.innodb_default_encryption_key_id;
-SET GLOBAL innodb_file_per_table = ON;
SET GLOBAL innodb_encrypt_tables = ON;
SET GLOBAL innodb_encryption_threads = 4;
SET GLOBAL innodb_default_encryption_key_id=4;
@@ -92,7 +90,6 @@ test.t check status OK
test.tpe check status OK
test.tp check status OK
DROP TABLE tce, tc, te, t, tpe, tp;
-SET GLOBAL innodb_file_per_table = @saved_file_per_table;
SET GLOBAL innodb_encrypt_tables = @saved_encrypt_tables;
SET GLOBAL innodb_encryption_threads = @saved_encryption_threads;
SET GLOBAL innodb_default_encryption_key_id = @saved_encryption_key_id;
diff --git a/mysql-test/suite/encryption/r/innodb-compressed-blob.result b/mysql-test/suite/encryption/r/innodb-compressed-blob.result
index 0dc873b99ab..bb87d171601 100644
--- a/mysql-test/suite/encryption/r/innodb-compressed-blob.result
+++ b/mysql-test/suite/encryption/r/innodb-compressed-blob.result
@@ -4,7 +4,6 @@ call mtr.add_suppression("InnoDB: Unable to decompress ..test.t[1-3]\\.ibd\\[pag
call mtr.add_suppression("InnoDB: Table `test`\\.`t[12]` is corrupted");
# Restart mysqld --file-key-management-filename=keys2.txt
# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
-SET GLOBAL innodb_file_per_table = ON;
set GLOBAL innodb_default_encryption_key_id=4;
create table t1(a int not null primary key, b blob, index(b(10))) engine=innodb row_format=compressed;
create table t2(a int not null primary key, b blob, index(b(10))) engine=innodb row_format=compressed encrypted=yes;
diff --git a/mysql-test/suite/encryption/r/innodb-force-corrupt.result b/mysql-test/suite/encryption/r/innodb-force-corrupt.result
index ad75df952b3..219dbd7cfc3 100644
--- a/mysql-test/suite/encryption/r/innodb-force-corrupt.result
+++ b/mysql-test/suite/encryption/r/innodb-force-corrupt.result
@@ -2,7 +2,6 @@ call mtr.add_suppression("InnoDB: Table `test`\\.`t[13]` (has an unreadable root
call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=\\d+, page number=[36]\\] in file .*test.t[123]\\.ibd looks corrupted; key_version=");
call mtr.add_suppression("\\[ERROR\\] InnoDB: We detected index corruption in an InnoDB type table");
call mtr.add_suppression("\\[ERROR\\] (mysqld|mariadbd).*: Index for table 't2' is corrupt; try to repair it");
-SET GLOBAL innodb_file_per_table = ON;
set global innodb_compression_algorithm = 1;
# Create and populate tables to be corrupted
CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT,c char(200)) ENGINE=InnoDB encrypted=yes;
diff --git a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result
index 4e816bea43b..4eb203d2787 100644
--- a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result
+++ b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result
@@ -1,4 +1,3 @@
-SET GLOBAL innodb_file_per_table = ON;
set global innodb_compression_algorithm = 1;
create database enctests;
use enctests;
diff --git a/mysql-test/suite/encryption/r/innodb-redo-badkey.result b/mysql-test/suite/encryption/r/innodb-redo-badkey.result
index 34fd043a7bd..6b8f6aa11da 100644
--- a/mysql-test/suite/encryption/r/innodb-redo-badkey.result
+++ b/mysql-test/suite/encryption/r/innodb-redo-badkey.result
@@ -14,7 +14,6 @@ call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE faile
call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot decrypt \\[page id: space=");
# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
# Wait max 10 min for key encryption threads to encrypt all spaces
-SET GLOBAL innodb_file_per_table = ON;
create table t1(a int not null primary key auto_increment, c char(250), b blob, index(b(10))) engine=innodb row_format=compressed encrypted=yes encryption_key_id=4;
create table t2(a int not null primary key auto_increment, c char(250), b blob, index(b(10))) engine=innodb row_format=compressed;
create table t3(a int not null primary key auto_increment, c char(250), b blob, index(b(10))) engine=innodb encrypted=yes encryption_key_id=4;
diff --git a/mysql-test/suite/encryption/r/innodb-redo-nokeys.result b/mysql-test/suite/encryption/r/innodb-redo-nokeys.result
index 859a73db056..6482a5d960d 100644
--- a/mysql-test/suite/encryption/r/innodb-redo-nokeys.result
+++ b/mysql-test/suite/encryption/r/innodb-redo-nokeys.result
@@ -12,7 +12,6 @@ call mtr.add_suppression("InnoDB: Missing FILE_CHECKPOINT");
call mtr.add_suppression("InnoDB: Log scan aborted at LSN");
call mtr.add_suppression("InnoDB: Set innodb_force_recovery=1 to ignore corruption");
# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
-SET GLOBAL innodb_file_per_table = ON;
create table t1(a int not null primary key auto_increment, c char(200), b blob, index(b(10))) engine=innodb row_format=compressed encrypted=yes encryption_key_id=20;
create table t2(a int not null primary key auto_increment, c char(200), b blob, index(b(10))) engine=innodb row_format=compressed encrypted=yes;
create table t3(a int not null primary key auto_increment, c char(200), b blob, index(b(10))) engine=innodb encrypted=yes encryption_key_id=20;
diff --git a/mysql-test/suite/encryption/r/innodb-remove-encryption.result b/mysql-test/suite/encryption/r/innodb-remove-encryption.result
index e241d213c8e..22207f8184a 100644
--- a/mysql-test/suite/encryption/r/innodb-remove-encryption.result
+++ b/mysql-test/suite/encryption/r/innodb-remove-encryption.result
@@ -1,4 +1,6 @@
set global innodb_file_per_table=OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
call mtr.add_suppression("(mysqld|mariadbd).*: file-key-management-filename is not set");
call mtr.add_suppression("Plugin 'file_key_management' init function returned error.");
call mtr.add_suppression("Plugin 'file_key_management' registration as a ENCRYPTION failed.");
diff --git a/mysql-test/suite/encryption/r/innodb-spatial-index.result b/mysql-test/suite/encryption/r/innodb-spatial-index.result
index 66c3edcd109..6bd22d6c25f 100644
--- a/mysql-test/suite/encryption/r/innodb-spatial-index.result
+++ b/mysql-test/suite/encryption/r/innodb-spatial-index.result
@@ -38,6 +38,9 @@ INSERT INTO t2 values(1, 'secret', ST_GeomFromText('POINT(903994614 180726515)')
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION > 0;
NAME
innodb_system
+innodb_undo001
+innodb_undo002
+innodb_undo003
mysql/innodb_index_stats
mysql/innodb_table_stats
mysql/transaction_registry
diff --git a/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result b/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result
index d6e32989c09..3a28082751b 100644
--- a/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result
+++ b/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result
@@ -41,7 +41,6 @@ NOT FOUND /mangled/ in t6.ibd
# t7 ... on expecting NOT FOUND
NOT FOUND /mysql/ in t7.ibd
# restart
-SET GLOBAL innodb_file_per_table = ON;
ALTER TABLE t1 ADD COLUMN b int default 2;
ALTER TABLE t2 ADD COLUMN b int default 2;
ALTER TABLE t7 ADD COLUMN b int default 2;
diff --git a/mysql-test/suite/encryption/r/tempfiles_encrypted.result b/mysql-test/suite/encryption/r/tempfiles_encrypted.result
index 46cfb3b58ec..5654946d786 100644
--- a/mysql-test/suite/encryption/r/tempfiles_encrypted.result
+++ b/mysql-test/suite/encryption/r/tempfiles_encrypted.result
@@ -1408,6 +1408,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1422,7 +1423,9 @@ EXPLAIN
"table": {
"table_name": "t0",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1443,6 +1446,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "t1.a",
"window_functions_computation": {
@@ -1459,7 +1463,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1480,6 +1486,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1494,7 +1501,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1517,6 +1526,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"having_condition": "MX in (3,5,7)",
"filesort": {
"sort_key": "t1.b",
@@ -1534,7 +1544,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 10,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1671,6 +1683,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1685,7 +1698,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1703,6 +1718,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1717,7 +1733,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1735,6 +1753,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1749,7 +1768,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1767,6 +1788,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -1781,7 +1803,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 3,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1827,6 +1851,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"filesort": {
"sort_key": "row_number() over ( order by t1.s1,t1.s2) desc",
"window_functions_computation": {
@@ -1843,7 +1868,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 6,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -1987,6 +2014,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"duplicate_removal": {
"window_functions_computation": {
"sorts": [
@@ -2002,7 +2030,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 9,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2167,6 +2197,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -2181,7 +2212,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 11,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -2234,6 +2267,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"window_functions_computation": {
"sorts": [
{
@@ -2248,7 +2282,9 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"rows": 6,
+ "cost": "COST_REPLACED",
"filtered": 100
}
}
@@ -3833,6 +3869,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"window_functions_computation": {
@@ -3855,9 +3892,11 @@ ANALYZE
"table": {
"table_name": "t1",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 3,
"r_rows": 3,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
diff --git a/mysql-test/suite/encryption/t/encrypt_and_grep.test b/mysql-test/suite/encryption/t/encrypt_and_grep.test
index 687f14e8a55..485a3eb2ec8 100644
--- a/mysql-test/suite/encryption/t/encrypt_and_grep.test
+++ b/mysql-test/suite/encryption/t/encrypt_and_grep.test
@@ -12,8 +12,6 @@
--let t3_IBD = $MYSQLD_DATADIR/test/t3.ibd
--let SEARCH_RANGE = 10000000
-SET GLOBAL innodb_file_per_table = ON;
-
create table t1 (a varchar(255)) engine=innodb encrypted=yes;
create table t2 (a varchar(255)) engine=innodb;
show warnings;
diff --git a/mysql-test/suite/encryption/t/innochecksum.test b/mysql-test/suite/encryption/t/innochecksum.test
index 516bc0733d9..358a6a0fcca 100644
--- a/mysql-test/suite/encryption/t/innochecksum.test
+++ b/mysql-test/suite/encryption/t/innochecksum.test
@@ -26,7 +26,6 @@ call mtr.add_suppression("InnoDB: Crash recovery is broken due to insufficient i
--enable_query_log
let $checksum_algorithm = `SELECT @@innodb_checksum_algorithm`;
-SET GLOBAL innodb_file_per_table = ON;
# zlib
set global innodb_compression_algorithm = 1;
diff --git a/mysql-test/suite/encryption/t/innodb-bad-key-change.test b/mysql-test/suite/encryption/t/innodb-bad-key-change.test
index 05a3b5f4d06..5eefded9ae1 100644
--- a/mysql-test/suite/encryption/t/innodb-bad-key-change.test
+++ b/mysql-test/suite/encryption/t/innodb-bad-key-change.test
@@ -23,8 +23,6 @@ call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot decrypt \\[page id: space="
-- let $restart_parameters=--file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
-- source include/restart_mysqld.inc
-SET GLOBAL innodb_file_per_table = ON;
-
CREATE TABLE t1 (c VARCHAR(8)) ENGINE=InnoDB ENCRYPTED=YES ENCRYPTION_KEY_ID=2;
INSERT INTO t1 VALUES ('foobar');
ALTER TABLE t1 ADD COLUMN c2 INT;
diff --git a/mysql-test/suite/encryption/t/innodb-bad-key-change2.test b/mysql-test/suite/encryption/t/innodb-bad-key-change2.test
index 21a9ddb217d..a6cc581e448 100644
--- a/mysql-test/suite/encryption/t/innodb-bad-key-change2.test
+++ b/mysql-test/suite/encryption/t/innodb-bad-key-change2.test
@@ -23,8 +23,6 @@ call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot decrypt \\[page id: space="
--let $restart_parameters=--plugin-load-add=file_key_management --file-key-management --file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
--source include/restart_mysqld.inc
-SET GLOBAL innodb_file_per_table = ON;
-
CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB
ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
diff --git a/mysql-test/suite/encryption/t/innodb-bad-key-change4.test b/mysql-test/suite/encryption/t/innodb-bad-key-change4.test
index c37eb365e7c..b041c988851 100644
--- a/mysql-test/suite/encryption/t/innodb-bad-key-change4.test
+++ b/mysql-test/suite/encryption/t/innodb-bad-key-change4.test
@@ -19,8 +19,6 @@ call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot decrypt \\[page id: space="
--let $restart_parameters=--plugin-load-add=file_key_management --file-key-management --file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
--source include/restart_mysqld.inc
-SET GLOBAL innodb_file_per_table = ON;
-
CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB
ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
diff --git a/mysql-test/suite/encryption/t/innodb-checksum-algorithm.test b/mysql-test/suite/encryption/t/innodb-checksum-algorithm.test
index 157fb25b185..2de7c171981 100644
--- a/mysql-test/suite/encryption/t/innodb-checksum-algorithm.test
+++ b/mysql-test/suite/encryption/t/innodb-checksum-algorithm.test
@@ -2,12 +2,10 @@
-- source include/innodb_page_size.inc
-- source include/have_file_key_management_plugin.inc
-SET @saved_file_per_table = @@global.innodb_file_per_table;
SET @saved_encrypt_tables = @@global.innodb_encrypt_tables;
SET @saved_encryption_threads = @@global.innodb_encryption_threads;
SET @saved_encryption_key_id = @@global.innodb_default_encryption_key_id;
-SET GLOBAL innodb_file_per_table = ON;
SET GLOBAL innodb_encrypt_tables = ON;
SET GLOBAL innodb_encryption_threads = 4;
@@ -77,7 +75,6 @@ update tp set b=substr(b,1);
CHECK TABLE tce, tc, te, t, tpe, tp;
DROP TABLE tce, tc, te, t, tpe, tp;
-SET GLOBAL innodb_file_per_table = @saved_file_per_table;
SET GLOBAL innodb_encrypt_tables = @saved_encrypt_tables;
SET GLOBAL innodb_encryption_threads = @saved_encryption_threads;
SET GLOBAL innodb_default_encryption_key_id = @saved_encryption_key_id;
diff --git a/mysql-test/suite/encryption/t/innodb-compressed-blob.test b/mysql-test/suite/encryption/t/innodb-compressed-blob.test
index 12d061a852f..d3d53e2c41a 100644
--- a/mysql-test/suite/encryption/t/innodb-compressed-blob.test
+++ b/mysql-test/suite/encryption/t/innodb-compressed-blob.test
@@ -13,8 +13,6 @@ call mtr.add_suppression("InnoDB: Table `test`\\.`t[12]` is corrupted");
-- let $restart_parameters=--file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
-- source include/restart_mysqld.inc
-SET GLOBAL innodb_file_per_table = ON;
-
set GLOBAL innodb_default_encryption_key_id=4;
create table t1(a int not null primary key, b blob, index(b(10))) engine=innodb row_format=compressed;
create table t2(a int not null primary key, b blob, index(b(10))) engine=innodb row_format=compressed encrypted=yes;
diff --git a/mysql-test/suite/encryption/t/innodb-force-corrupt.test b/mysql-test/suite/encryption/t/innodb-force-corrupt.test
index 51771f1e14b..73fa0cde0cf 100644
--- a/mysql-test/suite/encryption/t/innodb-force-corrupt.test
+++ b/mysql-test/suite/encryption/t/innodb-force-corrupt.test
@@ -12,7 +12,6 @@ call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=\\d+, page nu
call mtr.add_suppression("\\[ERROR\\] InnoDB: We detected index corruption in an InnoDB type table");
call mtr.add_suppression("\\[ERROR\\] (mysqld|mariadbd).*: Index for table 't2' is corrupt; try to repair it");
-SET GLOBAL innodb_file_per_table = ON;
set global innodb_compression_algorithm = 1;
--echo # Create and populate tables to be corrupted
diff --git a/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test b/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test
index d360d5f8af7..1bd69365f68 100644
--- a/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test
+++ b/mysql-test/suite/encryption/t/innodb-key-rotation-disable.test
@@ -4,7 +4,6 @@
-- source include/not_embedded.inc
let $encryption = `SELECT @@innodb_encrypt_tables`;
-SET GLOBAL innodb_file_per_table = ON;
# zlib
set global innodb_compression_algorithm = 1;
diff --git a/mysql-test/suite/encryption/t/innodb-redo-badkey.opt b/mysql-test/suite/encryption/t/innodb-redo-badkey.opt
index 2de0bdb3241..60d43648e00 100644
--- a/mysql-test/suite/encryption/t/innodb-redo-badkey.opt
+++ b/mysql-test/suite/encryption/t/innodb-redo-badkey.opt
@@ -1,4 +1,3 @@
---innodb-change-buffering=all
--innodb-encrypt-tables=on
--innodb-tablespaces-encryption
--innodb-encryption-threads=2
diff --git a/mysql-test/suite/encryption/t/innodb-redo-badkey.test b/mysql-test/suite/encryption/t/innodb-redo-badkey.test
index 393ca4ad375..de6d7f2f253 100644
--- a/mysql-test/suite/encryption/t/innodb-redo-badkey.test
+++ b/mysql-test/suite/encryption/t/innodb-redo-badkey.test
@@ -31,8 +31,6 @@ call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot decrypt \\[page id: space="
--let $wait_condition=SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0
--source include/wait_condition.inc
-SET GLOBAL innodb_file_per_table = ON;
-
create table t1(a int not null primary key auto_increment, c char(250), b blob, index(b(10))) engine=innodb row_format=compressed encrypted=yes encryption_key_id=4;
create table t2(a int not null primary key auto_increment, c char(250), b blob, index(b(10))) engine=innodb row_format=compressed;
create table t3(a int not null primary key auto_increment, c char(250), b blob, index(b(10))) engine=innodb encrypted=yes encryption_key_id=4;
diff --git a/mysql-test/suite/encryption/t/innodb-redo-nokeys.opt b/mysql-test/suite/encryption/t/innodb-redo-nokeys.opt
index 6190ad24ed3..4e7b22902a4 100644
--- a/mysql-test/suite/encryption/t/innodb-redo-nokeys.opt
+++ b/mysql-test/suite/encryption/t/innodb-redo-nokeys.opt
@@ -1,2 +1 @@
---innodb-change-buffering=none
--innodb-default-encryption-key-id=20
diff --git a/mysql-test/suite/encryption/t/innodb-redo-nokeys.test b/mysql-test/suite/encryption/t/innodb-redo-nokeys.test
index 713b98130be..1eca1d8cf73 100644
--- a/mysql-test/suite/encryption/t/innodb-redo-nokeys.test
+++ b/mysql-test/suite/encryption/t/innodb-redo-nokeys.test
@@ -20,8 +20,6 @@ call mtr.add_suppression("InnoDB: Set innodb_force_recovery=1 to ignore corrupti
-- let $restart_parameters=--file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
-- source include/restart_mysqld.inc
-SET GLOBAL innodb_file_per_table = ON;
-
create table t1(a int not null primary key auto_increment, c char(200), b blob, index(b(10))) engine=innodb row_format=compressed encrypted=yes encryption_key_id=20;
create table t2(a int not null primary key auto_increment, c char(200), b blob, index(b(10))) engine=innodb row_format=compressed encrypted=yes;
create table t3(a int not null primary key auto_increment, c char(200), b blob, index(b(10))) engine=innodb encrypted=yes encryption_key_id=20;
diff --git a/mysql-test/suite/encryption/t/innodb_onlinealter_encryption.test b/mysql-test/suite/encryption/t/innodb_onlinealter_encryption.test
index 9f61bf118aa..dc6d1e6f93c 100644
--- a/mysql-test/suite/encryption/t/innodb_onlinealter_encryption.test
+++ b/mysql-test/suite/encryption/t/innodb_onlinealter_encryption.test
@@ -75,8 +75,6 @@ set autocommit=1;
-- source include/start_mysqld.inc
-SET GLOBAL innodb_file_per_table = ON;
-
ALTER TABLE t1 ADD COLUMN b int default 2;
ALTER TABLE t2 ADD COLUMN b int default 2;
ALTER TABLE t7 ADD COLUMN b int default 2;
diff --git a/mysql-test/suite/engines/funcs/r/rpl_drop_db.result b/mysql-test/suite/engines/funcs/r/rpl_drop_db.result
index 3712527afe4..1b132c20afc 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_drop_db.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_drop_db.result
@@ -6,8 +6,6 @@ create database mysqltest1;
create table mysqltest1.t1 (n int);
insert into mysqltest1.t1 values (1);
select * from mysqltest1.t1 into outfile 'mysqltest1/f1.txt';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create table mysqltest1.t2 (n int);
create table mysqltest1.t3 (n int);
drop database mysqltest1;
diff --git a/mysql-test/suite/engines/funcs/r/rpl_misc_functions.result b/mysql-test/suite/engines/funcs/r/rpl_misc_functions.result
index 302cf2351c2..6c20623d62b 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_misc_functions.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_misc_functions.result
@@ -42,8 +42,6 @@ INSERT INTO t1 (col_a) VALUES (test_replication_sf());
INSERT INTO t1 (col_a) VALUES (test_replication_sf());
connection slave;
select * from t1 into outfile "../../tmp/t1_slave.txt";
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
connection master;
create temporary table t1_slave select * from t1 where 1=0;
load data infile '../../tmp/t1_slave.txt' into table t1_slave;
diff --git a/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.rdiff b/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.rdiff
index da41283e42f..cbf297f8071 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.rdiff
+++ b/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.rdiff
@@ -1,5 +1,5 @@
---- /home/alice/git/10.3/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.result~ 2021-03-19 17:27:12.935559866 +0100
-+++ /home/alice/git/10.3/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.reject 2021-03-19 17:27:14.071534938 +0100
+--- /home/alice/git/10.3/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.result~
++++ /home/alice/git/10.3/mysql-test/suite/engines/funcs/r/rpl_sp,myisam,mix.reject
@@ -126,12 +126,15 @@
show warnings;
Level Code Message
diff --git a/mysql-test/suite/engines/funcs/r/rpl_temporary.result b/mysql-test/suite/engines/funcs/r/rpl_temporary.result
index 492e9ac3ac3..3651ead16cc 100644
--- a/mysql-test/suite/engines/funcs/r/rpl_temporary.result
+++ b/mysql-test/suite/engines/funcs/r/rpl_temporary.result
@@ -42,12 +42,12 @@ connect con3,localhost,zedjzlcsjhd,,;
connection con3;
SET @save_select_limit=@@session.sql_select_limit;
SET @@session.sql_select_limit=10, @@session.pseudo_thread_id=100;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation
SELECT @@session.sql_select_limit = @save_select_limit;
@@session.sql_select_limit = @save_select_limit
1
SET @@session.sql_select_limit=10, @@session.sql_log_bin=0;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SELECT @@session.sql_select_limit = @save_select_limit;
@@session.sql_select_limit = @save_select_limit
1
diff --git a/mysql-test/suite/engines/iuds/r/type_bit_iuds.result b/mysql-test/suite/engines/iuds/r/type_bit_iuds.result
index 93ed7a9f162..6f48a430292 100644
--- a/mysql-test/suite/engines/iuds/r/type_bit_iuds.result
+++ b/mysql-test/suite/engines/iuds/r/type_bit_iuds.result
@@ -568,9 +568,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -1399,9 +1408,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -2252,10 +2270,17 @@ hex(c1) hex(c2)
1 3
2 2
3 3
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
-Warning 1264 Out of range value for column 'c2' at row 2
-Warning 1264 Out of range value for column 'c2' at row 3
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -3191,7 +3216,7 @@ hex(c1) hex(c2)
8 D
9 D
A A
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
@@ -11797,9 +11822,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -12628,9 +12662,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -13481,10 +13524,17 @@ hex(c1) hex(c2)
1 3
2 2
3 3
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
-Warning 1264 Out of range value for column 'c2' at row 2
-Warning 1264 Out of range value for column 'c2' at row 3
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -14420,7 +14470,7 @@ hex(c1) hex(c2)
8 D
9 D
A A
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
@@ -23032,9 +23082,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -23869,9 +23928,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -24722,10 +24790,17 @@ hex(c1) hex(c2)
1 3
2 2
3 3
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
-Warning 1264 Out of range value for column 'c2' at row 2
-Warning 1264 Out of range value for column 'c2' at row 3
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -25661,7 +25736,7 @@ hex(c1) hex(c2)
8 D
9 D
A A
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
@@ -34273,9 +34348,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -35110,9 +35194,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -35969,10 +36062,17 @@ hex(c1) hex(c2)
1 3
2 2
3 3
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
-Warning 1264 Out of range value for column 'c2' at row 2
-Warning 1264 Out of range value for column 'c2' at row 3
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -36908,7 +37008,7 @@ hex(c1) hex(c2)
8 D
9 D
A A
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
@@ -45520,9 +45620,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -46357,9 +46466,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -47216,10 +47334,17 @@ hex(c1) hex(c2)
1 3
2 2
3 3
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
-Warning 1264 Out of range value for column 'c2' at row 2
-Warning 1264 Out of range value for column 'c2' at row 3
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -48161,7 +48286,7 @@ hex(c1) hex(c2)
8 D
9 D
A A
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
@@ -56781,9 +56906,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -57620,9 +57754,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -58481,10 +58624,17 @@ hex(c1) hex(c2)
1 3
2 2
3 3
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
-Warning 1264 Out of range value for column 'c2' at row 2
-Warning 1264 Out of range value for column 'c2' at row 3
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -59428,7 +59578,7 @@ hex(c1) hex(c2)
8 D
9 D
A A
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
@@ -68059,9 +68209,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -68895,9 +69054,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -69753,10 +69921,17 @@ hex(c1) hex(c2)
1 3
2 2
3 3
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
-Warning 1264 Out of range value for column 'c2' at row 2
-Warning 1264 Out of range value for column 'c2' at row 3
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -70697,7 +70872,7 @@ hex(c1) hex(c2)
8 D
9 D
A A
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
@@ -79322,9 +79497,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -80158,9 +80342,18 @@ UPDATE IGNORE t5 SET c2=c2+10 WHERE c1 IN (b'001',b'101',b'111');
SELECT hex(c1),hex(c2) FROM t5;
hex(c1) hex(c2)
1 1
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -81016,10 +81209,17 @@ hex(c1) hex(c2)
1 3
2 2
3 3
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
-Warning 1264 Out of range value for column 'c2' at row 2
-Warning 1264 Out of range value for column 'c2' at row 3
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
+Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
TRUNCATE t6;
INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135);
@@ -81960,7 +82160,7 @@ hex(c1) hex(c2)
8 D
9 D
A A
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
Warnings:
Warning 1264 Out of range value for column 'c2' at row 1
TRUNCATE t5;
diff --git a/mysql-test/suite/engines/iuds/t/type_bit_iuds.test b/mysql-test/suite/engines/iuds/t/type_bit_iuds.test
index 8f48d50fad0..fac2505fd22 100644
--- a/mysql-test/suite/engines/iuds/t/type_bit_iuds.test
+++ b/mysql-test/suite/engines/iuds/t/type_bit_iuds.test
@@ -227,7 +227,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -513,7 +513,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -799,7 +799,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -1085,7 +1085,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -2515,7 +2515,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -2801,7 +2801,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -3087,7 +3087,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -3373,7 +3373,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -4803,7 +4803,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -5089,7 +5089,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -5375,7 +5375,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -5661,7 +5661,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -7091,7 +7091,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -7377,7 +7377,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -7663,7 +7663,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -7949,7 +7949,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -9379,7 +9379,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -9665,7 +9665,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -9951,7 +9951,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -10237,7 +10237,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -11667,7 +11667,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -11953,7 +11953,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -12239,7 +12239,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -12525,7 +12525,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -13956,7 +13956,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -14243,7 +14243,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -14530,7 +14530,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -14817,7 +14817,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -16252,7 +16252,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -16539,7 +16539,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -16826,7 +16826,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
@@ -17113,7 +17113,7 @@ SELECT hex(c1),hex(c2) FROM t5;
# Update using eq_ref
# EXPLAIN SELECT * FROM t5,t6 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
-UPDATE IGNORE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
+UPDATE IGNORE t6 straight_join t5 force index(PRIMARY) SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2;
############# DELETE ###########
diff --git a/mysql-test/suite/federated/federated_server.result b/mysql-test/suite/federated/federated_server.result
index d25676e7ad5..1207a4bbe09 100644
--- a/mysql-test/suite/federated/federated_server.result
+++ b/mysql-test/suite/federated/federated_server.result
@@ -197,7 +197,7 @@ OWNER 'root');
create user guest_select@localhost;
grant select on federated.* to guest_select@localhost;
create user guest_super@localhost;
-grant select,SUPER,RELOAD on *.* to guest_super@localhost;
+grant select,FEDERATED ADMIN,RELOAD on *.* to guest_super@localhost;
create user guest_usage@localhost;
grant usage on *.* to guest_usage@localhost;
CREATE TABLE federated.t1 (
@@ -212,7 +212,7 @@ connect conn_usage,127.0.0.1,guest_usage,,,$MASTER_MYPORT;
connect conn_super,127.0.0.1,guest_super,,,$MASTER_MYPORT;
connection conn_select;
alter server s1 options (database 'db_bogus');
-ERROR 42000: Access denied; you need (at least one of) the SUPER, FEDERATED ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the FEDERATED ADMIN privilege(s) for this operation
connection master;
flush tables;
select * from federated.t1;
@@ -220,7 +220,7 @@ id name
1 this is legitimate
connection conn_usage;
alter server s1 options (database 'db_bogus');
-ERROR 42000: Access denied; you need (at least one of) the SUPER, FEDERATED ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the FEDERATED ADMIN privilege(s) for this operation
connection master;
flush tables;
select * from federated.t1;
@@ -234,7 +234,7 @@ select * from federated.t1;
Got one of the listed errors
connection conn_select;
drop server if exists 's1';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, FEDERATED ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the FEDERATED ADMIN privilege(s) for this operation
create server 's1' foreign data wrapper 'mysql' options
(HOST '127.0.0.1',
DATABASE 'db_legitimate',
@@ -243,7 +243,7 @@ PASSWORD 'foo',
PORT SLAVE_PORT,
SOCKET '',
OWNER 'root');
-ERROR 42000: Access denied; you need (at least one of) the SUPER, FEDERATED ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the FEDERATED ADMIN privilege(s) for this operation
connection conn_super;
drop server 's1';
create server 's1' foreign data wrapper 'mysql' options
diff --git a/mysql-test/suite/federated/federated_server.test b/mysql-test/suite/federated/federated_server.test
index 3d491b1dfdf..10f383d4efa 100644
--- a/mysql-test/suite/federated/federated_server.test
+++ b/mysql-test/suite/federated/federated_server.test
@@ -162,7 +162,7 @@ drop database second_db;
#
# Bug#25671 - CREATE/DROP/ALTER SERVER should require privileges
#
-# Changes to SERVER declarations should require SUPER privilege.
+# Changes to SERVER declarations should require FEDERATED ADMIN privilege.
# Based upon test case by Giuseppe Maxia
create database db_legitimate;
@@ -202,7 +202,7 @@ create user guest_select@localhost;
grant select on federated.* to guest_select@localhost;
create user guest_super@localhost;
-grant select,SUPER,RELOAD on *.* to guest_super@localhost;
+grant select,FEDERATED ADMIN,RELOAD on *.* to guest_super@localhost;
create user guest_usage@localhost;
grant usage on *.* to guest_usage@localhost;
diff --git a/mysql-test/suite/federated/federatedx.result b/mysql-test/suite/federated/federatedx.result
index 49deff81c4c..bb817b210f2 100644
--- a/mysql-test/suite/federated/federatedx.result
+++ b/mysql-test/suite/federated/federatedx.result
@@ -2357,6 +2357,22 @@ DROP TABLE t2_fed, t1, t2;
set @@optimizer_switch=@save_optimizer_switch;
DROP SERVER s;
# End of 10.5 tests
+#
+# MDEV-30569: Assertion ...ha_table_flags() failed in Duplicate_weedout_picker::check_qep
+#
+create server s foreign data wrapper mysql options
+(host "127.0.0.1", database "test", user "root", port $MASTER_MYPORT);
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b INT);
+INSERT INTO t2 VALUES (3),(4);
+CREATE TABLE t1_fed ENGINE=FEDERATED CONNECTION='s/t1';
+CREATE VIEW v AS SELECT * FROM t1_fed;
+SELECT * FROM v WHERE a IN ( SELECT b FROM t2);
+a
+DROP VIEW v;
+DROP TABLE t1_fed, t1, t2;
+DROP SERVER s;
connection master;
DROP TABLE IF EXISTS federated.t1;
DROP DATABASE IF EXISTS federated;
diff --git a/mysql-test/suite/federated/federatedx.test b/mysql-test/suite/federated/federatedx.test
index 7e5a335b786..579316beed8 100644
--- a/mysql-test/suite/federated/federatedx.test
+++ b/mysql-test/suite/federated/federatedx.test
@@ -2090,4 +2090,26 @@ DROP SERVER s;
--echo # End of 10.5 tests
+--echo #
+--echo # MDEV-30569: Assertion ...ha_table_flags() failed in Duplicate_weedout_picker::check_qep
+--echo #
+
+evalp create server s foreign data wrapper mysql options
+ (host "127.0.0.1", database "test", user "root", port $MASTER_MYPORT);
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+
+CREATE TABLE t2 (b INT);
+INSERT INTO t2 VALUES (3),(4);
+
+CREATE TABLE t1_fed ENGINE=FEDERATED CONNECTION='s/t1';
+CREATE VIEW v AS SELECT * FROM t1_fed;
+
+SELECT * FROM v WHERE a IN ( SELECT b FROM t2);
+
+DROP VIEW v;
+DROP TABLE t1_fed, t1, t2;
+DROP SERVER s;
+
source include/federated_cleanup.inc;
diff --git a/mysql-test/suite/federated/federatedx_create_handlers.result b/mysql-test/suite/federated/federatedx_create_handlers.result
index f2e2247bae1..2f413f720de 100644
--- a/mysql-test/suite/federated/federatedx_create_handlers.result
+++ b/mysql-test/suite/federated/federatedx_create_handlers.result
@@ -150,7 +150,7 @@ FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
WHERE federated.t3.name=t.name;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 7
-1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 2
+1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 1
2 PUSHED DERIVED NULL NULL NULL NULL NULL NULL NULL NULL
EXPLAIN FORMAT=JSON
SELECT *
@@ -160,12 +160,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"rows": 7,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -178,7 +181,9 @@ EXPLAIN
"key_length": "18",
"used_key_parts": ["name"],
"ref": ["federated.t3.name"],
- "rows": 2,
+ "loops": 7,
+ "rows": 1,
+ "cost": "COST_REPLACED",
"filtered": 100,
"materialized": {
"query_block": {
@@ -199,7 +204,7 @@ FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
WHERE federated.t3.name=t.name;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 7 7.00 100.00 100.00
-1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 2 0.00 100.00 100.00
+1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 1 0.00 100.00 100.00
2 PUSHED DERIVED NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
SELECT *
FROM federated.t3, (SELECT t1.name FROM federated.t1
@@ -216,7 +221,7 @@ FROM federated.t2 GROUP BY name)) t
WHERE federated.t3.name=t.name;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 7
-1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 2
+1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 1
2 PUSHED DERIVED NULL NULL NULL NULL NULL NULL NULL NULL
ANALYZE FORMAT=JSON
SELECT *
@@ -231,6 +236,7 @@ ANALYZE
},
"query_block": {
"select_id": 1,
+ "cost": "REPLACED",
"r_loops": 1,
"r_total_time_ms": "REPLACED",
"nested_loop": [
@@ -238,9 +244,11 @@ ANALYZE
"table": {
"table_name": "t3",
"access_type": "ALL",
+ "loops": 1,
"r_loops": 1,
"rows": 7,
"r_rows": 7,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -256,9 +264,11 @@ ANALYZE
"key_length": "18",
"used_key_parts": ["name"],
"ref": ["federated.t3.name"],
+ "loops": 7,
"r_loops": 7,
- "rows": 2,
+ "rows": 1,
"r_rows": 0,
+ "cost": "REPLACED",
"r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED",
"filtered": 100,
@@ -298,7 +308,7 @@ SELECT * FROM federated.t1 WHERE id >= 5) t
WHERE federated.t3.name=t.name;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 7
-1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 2
+1 PRIMARY <derived2> ref key1,distinct_key key1 18 federated.t3.name 1
2 PUSHED DERIVED NULL NULL NULL NULL NULL NULL NULL NULL
#
# MDEV-21887: federatedx crashes on SELECT ... INTO query in select_handler code
@@ -364,12 +374,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "t5",
"access_type": "ALL",
+ "loops": 1,
"rows": 2,
+ "cost": "COST_REPLACED",
"filtered": 100
}
},
@@ -378,7 +391,9 @@ EXPLAIN
"table": {
"table_name": "<derived2>",
"access_type": "ALL",
+ "loops": 2,
"rows": 5,
+ "cost": "COST_REPLACED",
"filtered": 100
},
"buffer_type": "flat",
diff --git a/mysql-test/suite/federated/federatedx_create_handlers.test b/mysql-test/suite/federated/federatedx_create_handlers.test
index f827c141f3d..566aee6a0d6 100644
--- a/mysql-test/suite/federated/federatedx_create_handlers.test
+++ b/mysql-test/suite/federated/federatedx_create_handlers.test
@@ -76,6 +76,7 @@ SELECT id FROM federated.t1 WHERE id < 5;
EXPLAIN EXTENDED
SELECT id FROM federated.t1 WHERE id < 5;
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT id FROM federated.t1 WHERE id < 5;
@@ -103,6 +104,7 @@ SELECT *
FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
WHERE federated.t3.name=t.name;
+--source include/explain-no-costs.inc
EXPLAIN FORMAT=JSON
SELECT *
FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
@@ -177,6 +179,7 @@ explain
select * from federated.t1
where name in (select name from federated.t2);
+--source include/explain-no-costs.inc
explain format=json
select * from federated.t1
where name in (select name from federated.t2);
@@ -196,6 +199,7 @@ select * from t5,
where name in (select name from federated.t2) or name like 'foo%') as TQ;
--echo # Must not show elements with select_id=3
+--source include/explain-no-costs.inc
explain format=json
select * from t5,
(select id from federated.t1
diff --git a/mysql-test/suite/funcs_1/r/innodb_trig_03e.result b/mysql-test/suite/funcs_1/r/innodb_trig_03e.result
index 51f4eca4f1a..c9e9b1c5ae7 100644
--- a/mysql-test/suite/funcs_1/r/innodb_trig_03e.result
+++ b/mysql-test/suite/funcs_1/r/innodb_trig_03e.result
@@ -1353,7 +1353,7 @@ drop trigger trg1_0;
create definer=not_ex_user@localhost trigger trg1_0
before INSERT on t1 for each row
set new.f1 = 'trig 1_0-yes';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
create definer=current_user trigger trg1_1
before INSERT on t1 for each row
set new.f1 = 'trig 1_1-yes';
@@ -1388,7 +1388,7 @@ GRANT SELECT, INSERT, UPDATE, TRIGGER ON `priv_db`.`t1` TO `test_yesprivs`@`loca
create definer=not_ex_user@localhost trigger trg1_3
after UPDATE on t1 for each row
set @var1 = 'trig 1_3-yes';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
connection default;
select current_user;
current_user
diff --git a/mysql-test/suite/funcs_1/r/is_columns_is.result b/mysql-test/suite/funcs_1/r/is_columns_is.result
index c88a3a9ac8d..be0d6aa7740 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_is.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_is.result
@@ -210,6 +210,19 @@ def information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_SCHEMA 10 NULL YES varc
def information_schema KEY_COLUMN_USAGE TABLE_CATALOG 4 NULL NO varchar 512 1536 NULL NULL NULL utf8mb3 utf8mb3_general_ci varchar(512) select NEVER NULL
def information_schema KEY_COLUMN_USAGE TABLE_NAME 6 NULL NO varchar 64 192 NULL NULL NULL utf8mb3 utf8mb3_general_ci varchar(64) select NEVER NULL
def information_schema KEY_COLUMN_USAGE TABLE_SCHEMA 5 NULL NO varchar 64 192 NULL NULL NULL utf8mb3 utf8mb3_general_ci varchar(64) select NEVER NULL
+def information_schema OPTIMIZER_COSTS ENGINE 1 NULL NO varchar 192 576 NULL NULL NULL utf8mb3 utf8mb3_general_ci varchar(192) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_DISK_READ_COST 2 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_DISK_READ_RATIO 8 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_INDEX_BLOCK_COPY_COST 3 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_COMPARE_COST 4 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_COPY_COST 5 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_LOOKUP_COST 6 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_NEXT_FIND_COST 7 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_ROWID_COMPARE_COST 12 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_ROWID_COPY_COST 13 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_COPY_COST 9 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_LOOKUP_COST 10 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_NEXT_FIND_COST 11 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) select NEVER NULL
def information_schema OPTIMIZER_TRACE INSUFFICIENT_PRIVILEGES 4 NULL NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(1) select NEVER NULL
def information_schema OPTIMIZER_TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE 3 NULL NO int NULL NULL 10 0 NULL NULL NULL int(20) select NEVER NULL
def information_schema OPTIMIZER_TRACE QUERY 1 NULL NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb3 utf8mb3_general_ci longtext select NEVER NULL
@@ -761,6 +774,19 @@ NULL information_schema KEY_COLUMN_USAGE POSITION_IN_UNIQUE_CONSTRAINT bigint NU
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_SCHEMA varchar 64 192 utf8mb3 utf8mb3_general_ci varchar(64)
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_NAME varchar 64 192 utf8mb3 utf8mb3_general_ci varchar(64)
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_COLUMN_NAME varchar 64 192 utf8mb3 utf8mb3_general_ci varchar(64)
+3.0000 information_schema OPTIMIZER_COSTS ENGINE varchar 192 576 utf8mb3 utf8mb3_general_ci varchar(192)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_DISK_READ_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_INDEX_BLOCK_COPY_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_COMPARE_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_COPY_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_LOOKUP_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_NEXT_FIND_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_DISK_READ_RATIO decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_COPY_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_LOOKUP_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_NEXT_FIND_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_ROWID_COMPARE_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_ROWID_COPY_COST decimal NULL NULL NULL NULL decimal(9,6)
1.0000 information_schema OPTIMIZER_TRACE QUERY longtext 4294967295 4294967295 utf8mb3 utf8mb3_general_ci longtext
1.0000 information_schema OPTIMIZER_TRACE TRACE longtext 4294967295 4294967295 utf8mb3 utf8mb3_general_ci longtext
NULL information_schema OPTIMIZER_TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE int NULL NULL NULL NULL int(20)
diff --git a/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result b/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
index bb12a0c38df..f9906d3f177 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
@@ -210,6 +210,19 @@ def information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_SCHEMA 10 NULL YES varc
def information_schema KEY_COLUMN_USAGE TABLE_CATALOG 4 NULL NO varchar 512 1536 NULL NULL NULL utf8mb3 utf8mb3_general_ci varchar(512) NEVER NULL
def information_schema KEY_COLUMN_USAGE TABLE_NAME 6 NULL NO varchar 64 192 NULL NULL NULL utf8mb3 utf8mb3_general_ci varchar(64) NEVER NULL
def information_schema KEY_COLUMN_USAGE TABLE_SCHEMA 5 NULL NO varchar 64 192 NULL NULL NULL utf8mb3 utf8mb3_general_ci varchar(64) NEVER NULL
+def information_schema OPTIMIZER_COSTS ENGINE 1 NULL NO varchar 192 576 NULL NULL NULL utf8mb3 utf8mb3_general_ci varchar(192) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_DISK_READ_COST 2 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_DISK_READ_RATIO 8 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_INDEX_BLOCK_COPY_COST 3 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_COMPARE_COST 4 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_COPY_COST 5 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_LOOKUP_COST 6 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_NEXT_FIND_COST 7 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_ROWID_COMPARE_COST 12 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_ROWID_COPY_COST 13 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_COPY_COST 9 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_LOOKUP_COST 10 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
+def information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_NEXT_FIND_COST 11 NULL NO decimal NULL NULL 9 6 NULL NULL NULL decimal(9,6) NEVER NULL
def information_schema OPTIMIZER_TRACE INSUFFICIENT_PRIVILEGES 4 NULL NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(1) NEVER NULL
def information_schema OPTIMIZER_TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE 3 NULL NO int NULL NULL 10 0 NULL NULL NULL int(20) NEVER NULL
def information_schema OPTIMIZER_TRACE QUERY 1 NULL NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb3 utf8mb3_general_ci longtext NEVER NULL
@@ -761,6 +774,19 @@ NULL information_schema KEY_COLUMN_USAGE POSITION_IN_UNIQUE_CONSTRAINT bigint NU
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_SCHEMA varchar 64 192 utf8mb3 utf8mb3_general_ci varchar(64)
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_NAME varchar 64 192 utf8mb3 utf8mb3_general_ci varchar(64)
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_COLUMN_NAME varchar 64 192 utf8mb3 utf8mb3_general_ci varchar(64)
+3.0000 information_schema OPTIMIZER_COSTS ENGINE varchar 192 576 utf8mb3 utf8mb3_general_ci varchar(192)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_DISK_READ_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_INDEX_BLOCK_COPY_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_COMPARE_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_COPY_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_LOOKUP_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_KEY_NEXT_FIND_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_DISK_READ_RATIO decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_COPY_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_LOOKUP_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_ROW_NEXT_FIND_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_ROWID_COMPARE_COST decimal NULL NULL NULL NULL decimal(9,6)
+NULL information_schema OPTIMIZER_COSTS OPTIMIZER_ROWID_COPY_COST decimal NULL NULL NULL NULL decimal(9,6)
1.0000 information_schema OPTIMIZER_TRACE QUERY longtext 4294967295 4294967295 utf8mb3 utf8mb3_general_ci longtext
1.0000 information_schema OPTIMIZER_TRACE TRACE longtext 4294967295 4294967295 utf8mb3 utf8mb3_general_ci longtext
NULL information_schema OPTIMIZER_TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE int NULL NULL NULL NULL int(20)
diff --git a/mysql-test/suite/funcs_1/r/is_tables_is.result b/mysql-test/suite/funcs_1/r/is_tables_is.result
index c18f733c86f..93ede4d08cc 100644
--- a/mysql-test/suite/funcs_1/r/is_tables_is.result
+++ b/mysql-test/suite/funcs_1/r/is_tables_is.result
@@ -514,6 +514,31 @@ user_comment
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA information_schema
+TABLE_NAME OPTIMIZER_COSTS
+TABLE_TYPE SYSTEM VIEW
+ENGINE MEMORY
+VERSION 11
+ROW_FORMAT Fixed
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8mb3_general_ci
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+MAX_INDEX_LENGTH #MIL#
+TEMPORARY Y
+user_comment
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA information_schema
TABLE_NAME OPTIMIZER_TRACE
TABLE_TYPE SYSTEM VIEW
ENGINE MYISAM_OR_MARIA
@@ -1630,6 +1655,31 @@ user_comment
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA information_schema
+TABLE_NAME OPTIMIZER_COSTS
+TABLE_TYPE SYSTEM VIEW
+ENGINE MEMORY
+VERSION 11
+ROW_FORMAT Fixed
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8mb3_general_ci
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+MAX_INDEX_LENGTH #MIL#
+TEMPORARY Y
+user_comment
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA information_schema
TABLE_NAME OPTIMIZER_TRACE
TABLE_TYPE SYSTEM VIEW
ENGINE MYISAM_OR_MARIA
diff --git a/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result b/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result
index c18f733c86f..93ede4d08cc 100644
--- a/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result
@@ -514,6 +514,31 @@ user_comment
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA information_schema
+TABLE_NAME OPTIMIZER_COSTS
+TABLE_TYPE SYSTEM VIEW
+ENGINE MEMORY
+VERSION 11
+ROW_FORMAT Fixed
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8mb3_general_ci
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+MAX_INDEX_LENGTH #MIL#
+TEMPORARY Y
+user_comment
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA information_schema
TABLE_NAME OPTIMIZER_TRACE
TABLE_TYPE SYSTEM VIEW
ENGINE MYISAM_OR_MARIA
@@ -1630,6 +1655,31 @@ user_comment
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA information_schema
+TABLE_NAME OPTIMIZER_COSTS
+TABLE_TYPE SYSTEM VIEW
+ENGINE MEMORY
+VERSION 11
+ROW_FORMAT Fixed
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8mb3_general_ci
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+MAX_INDEX_LENGTH #MIL#
+TEMPORARY Y
+user_comment
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA information_schema
TABLE_NAME OPTIMIZER_TRACE
TABLE_TYPE SYSTEM VIEW
ENGINE MYISAM_OR_MARIA
diff --git a/mysql-test/suite/funcs_1/r/memory_trig_03e.result b/mysql-test/suite/funcs_1/r/memory_trig_03e.result
index 9397e0766ef..a4a429755b5 100644
--- a/mysql-test/suite/funcs_1/r/memory_trig_03e.result
+++ b/mysql-test/suite/funcs_1/r/memory_trig_03e.result
@@ -1354,7 +1354,7 @@ drop trigger trg1_0;
create definer=not_ex_user@localhost trigger trg1_0
before INSERT on t1 for each row
set new.f1 = 'trig 1_0-yes';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
create definer=current_user trigger trg1_1
before INSERT on t1 for each row
set new.f1 = 'trig 1_1-yes';
@@ -1389,7 +1389,7 @@ GRANT SELECT, INSERT, UPDATE, TRIGGER ON `priv_db`.`t1` TO `test_yesprivs`@`loca
create definer=not_ex_user@localhost trigger trg1_3
after UPDATE on t1 for each row
set @var1 = 'trig 1_3-yes';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
connection default;
select current_user;
current_user
diff --git a/mysql-test/suite/funcs_1/r/myisam_trig_03e.result b/mysql-test/suite/funcs_1/r/myisam_trig_03e.result
index b16beda5752..eb70366b789 100644
--- a/mysql-test/suite/funcs_1/r/myisam_trig_03e.result
+++ b/mysql-test/suite/funcs_1/r/myisam_trig_03e.result
@@ -1354,7 +1354,7 @@ drop trigger trg1_0;
create definer=not_ex_user@localhost trigger trg1_0
before INSERT on t1 for each row
set new.f1 = 'trig 1_0-yes';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
create definer=current_user trigger trg1_1
before INSERT on t1 for each row
set new.f1 = 'trig 1_1-yes';
@@ -1389,7 +1389,7 @@ GRANT SELECT, INSERT, UPDATE, TRIGGER ON `priv_db`.`t1` TO `test_yesprivs`@`loca
create definer=not_ex_user@localhost trigger trg1_3
after UPDATE on t1 for each row
set @var1 = 'trig 1_3-yes';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
connection default;
select current_user;
current_user
diff --git a/mysql-test/suite/galera/r/galera_event_node_evict.result b/mysql-test/suite/galera/r/galera_event_node_evict.result
new file mode 100644
index 00000000000..62911facd05
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_event_node_evict.result
@@ -0,0 +1,21 @@
+connection node_2;
+connection node_1;
+# Correct Galera library found
+connection node_1;
+CALL mtr.add_suppression("\\[Warning\\] WSREP: evicting member .* at .* permanently from group");
+connection node_2;
+CALL mtr.add_suppression("\\[Warning\\] WSREP: handshake with .* .* failed: 'evicted'");
+CALL mtr.add_suppression("\\[ERROR\\] WSREP: exception from gcomm, backend must be restarted: this node has been evicted out of the cluster, gcomm backend restart is required \\(FATAL\\)");
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+CALL mtr.add_suppression("\\[Warning\\] WSREP: evicting member .* at .* permanently from group");
+connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4;
+CALL mtr.add_suppression("\\[Warning\\] WSREP: evicting member .* at .* permanently from group");
+connection node_1;
+connection node_2;
+connection node_3;
+connection node_4;
+connection node_2;
+SET SESSION wsrep_on = ON;
+SET SESSION wsrep_sync_wait = 15;
+SET GLOBAL wsrep_on = OFF;
+include/assert_grep.inc [Node evicted]
diff --git a/mysql-test/suite/galera/r/galera_ist_MDEV-28423,debug.rdiff b/mysql-test/suite/galera/r/galera_ist_MDEV-28423,debug.rdiff
index 4eda9d7d045..32c3949b1c4 100644
--- a/mysql-test/suite/galera/r/galera_ist_MDEV-28423,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_ist_MDEV-28423,debug.rdiff
@@ -1,5 +1,5 @@
---- suite/galera/r/galera_ist_MDEV-28423.result 2022-06-13 09:40:33.073863796 +0300
-+++ suite/galera/r/galera_ist_MDEV-28423.reject 2022-06-13 09:58:59.936874991 +0300
+--- suite/galera/r/galera_ist_MDEV-28423.result
++++ suite/galera/r/galera_ist_MDEV-28423.reject
@@ -517,3 +517,187 @@
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_ist_MDEV-28583,debug.rdiff b/mysql-test/suite/galera/r/galera_ist_MDEV-28583,debug.rdiff
index 1c33916330a..c1bc37fb202 100644
--- a/mysql-test/suite/galera/r/galera_ist_MDEV-28583,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_ist_MDEV-28583,debug.rdiff
@@ -1,5 +1,5 @@
---- suite/galera/r/galera_ist_MDEV-28583.result 2022-06-11 10:48:16.875034382 +0300
-+++ suite/galera/r/galera_ist_MDEV-28583,debug.reject 2022-06-11 11:25:55.616481509 +0300
+--- suite/galera/r/galera_ist_MDEV-28583.result
++++ suite/galera/r/galera_ist_MDEV-28583,debug.reject
@@ -517,3 +517,187 @@
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff b/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff
index adf12c23e4a..243b2a50b2a 100644
--- a/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff
@@ -1,5 +1,5 @@
---- r/galera_ist_mariabackup.result 2021-04-10 14:21:16.141724901 +0300
-+++ r/galera_ist_mariabackup,debug.reject 2021-04-10 14:49:04.455785652 +0300
+--- r/galera_ist_mariabackup.result
++++ r/galera_ist_mariabackup,debug.reject
@@ -517,3 +517,187 @@
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs,debug.rdiff b/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs,debug.rdiff
index c9457d70812..b7a91b010fe 100644
--- a/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs,debug.rdiff
@@ -1,5 +1,5 @@
---- r/galera_ist_mariabackup_innodb_flush_logs.result 2021-04-10 14:21:52.661886653 +0300
-+++ r/galera_ist_mariabackup_innodb_flush_logs,debug.reject 2021-04-10 14:49:56.740062774 +0300
+--- r/galera_ist_mariabackup_innodb_flush_logs.result
++++ r/galera_ist_mariabackup_innodb_flush_logs,debug.reject
@@ -172,3 +172,187 @@
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_ist_rsync,debug.rdiff b/mysql-test/suite/galera/r/galera_ist_rsync,debug.rdiff
index e76b37838fb..f3df998be95 100644
--- a/mysql-test/suite/galera/r/galera_ist_rsync,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_ist_rsync,debug.rdiff
@@ -1,5 +1,5 @@
---- r/galera_ist_rsync.result 2021-04-10 14:24:05.942467091 +0300
-+++ r/galera_ist_rsync,debug.reject 2021-04-10 14:52:14.236776538 +0300
+--- r/galera_ist_rsync.result
++++ r/galera_ist_rsync,debug.reject
@@ -517,3 +517,187 @@
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_many_indexes.result b/mysql-test/suite/galera/r/galera_many_indexes.result
index 963d3552252..c0eabffd06d 100644
--- a/mysql-test/suite/galera/r/galera_many_indexes.result
+++ b/mysql-test/suite/galera/r/galera_many_indexes.result
@@ -74,7 +74,7 @@ LENGTH(f1) = 767
1
EXPLAIN SELECT COUNT(*) = 1 FROM t1 FORCE KEY (PRIMARY) WHERE f1 = REPEAT('a', 767);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 const PRIMARY PRIMARY 769 const 1 Using index
+1 SIMPLE t1 const PRIMARY PRIMARY 769 const 1
SELECT COUNT(*) = 1 FROM t1 FORCE KEY (PRIMARY) WHERE f1 = REPEAT('a', 767);
COUNT(*) = 1
1
diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir,debug.rdiff
index 870b12de3c9..875d53addd8 100644
--- a/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir,debug.rdiff
@@ -1,5 +1,5 @@
---- r/galera_sst_mariabackup_data_dir.result 2021-04-10 14:26:02.798965488 +0300
-+++ r/galera_sst_mariabackup_data_dir,debug.reject 2021-04-10 14:54:44.825538224 +0300
+--- r/galera_sst_mariabackup_data_dir.result
++++ r/galera_sst_mariabackup_data_dir,debug.reject
@@ -516,5 +516,189 @@
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup_force_recovery,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_mariabackup_force_recovery,debug.rdiff
index bad8355b514..da294317c27 100644
--- a/mysql-test/suite/galera/r/galera_sst_mariabackup_force_recovery,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_mariabackup_force_recovery,debug.rdiff
@@ -1,5 +1,5 @@
---- r/galera_sst_mariabackup.result 2021-04-10 14:25:04.142716409 +0300
-+++ r/galera_sst_mariabackup,debug.reject 2021-04-10 14:53:30.033162191 +0300
+--- r/galera_sst_mariabackup.result
++++ r/galera_sst_mariabackup,debug.reject
@@ -516,5 +516,189 @@
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff
index 14f67770572..ae242e2b216 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff
@@ -1,5 +1,5 @@
---- r/galera_sst_rsync2.result 2021-04-10 14:34:48.646288119 +0300
-+++ r/galera_sst_rsync2,debug.reject 2021-04-10 15:04:10.276286996 +0300
+--- r/galera_sst_rsync2.result
++++ r/galera_sst_rsync2,debug.reject
@@ -516,3 +516,187 @@
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync_data_dir,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_rsync_data_dir,debug.rdiff
index 00b42d53b51..558a8e7cd07 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync_data_dir,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_rsync_data_dir,debug.rdiff
@@ -1,5 +1,5 @@
---- r/galera_sst_rsync_data_dir.result 2021-04-10 14:35:28.090610315 +0300
-+++ r/galera_sst_rsync_data_dir,debug.reject 2021-04-10 15:50:26.945234998 +0300
+--- r/galera_sst_rsync_data_dir.result
++++ r/galera_sst_rsync_data_dir,debug.reject
@@ -516,3 +516,187 @@
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_event_node_evict.cnf b/mysql-test/suite/galera/t/galera_event_node_evict.cnf
new file mode 100644
index 00000000000..e9b669babca
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_event_node_evict.cnf
@@ -0,0 +1,14 @@
+!include ../galera_4nodes.cnf
+
+[mysqld.1]
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;evs.auto_evict=1'
+
+[mysqld.2]
+wsrep_provider_options='base_port=@mysqld.2.#galera_port;evs.auto_evict=1'
+wsrep_status_file='status2.json'
+
+[mysqld.3]
+wsrep_provider_options='base_port=@mysqld.3.#galera_port;evs.auto_evict=1'
+
+[mysqld.4]
+wsrep_provider_options='base_port=@mysqld.4.#galera_port;evs.auto_evict=1'
diff --git a/mysql-test/suite/galera/t/galera_event_node_evict.test b/mysql-test/suite/galera/t/galera_event_node_evict.test
new file mode 100644
index 00000000000..9651bf09d48
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_event_node_evict.test
@@ -0,0 +1,96 @@
+#
+# Test that node eviction event is generated in the status file.
+#
+
+--source include/big_test.inc
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+# Make sure that Galera library has node eviction event reporting.
+--let $galera_version=26.4.14
+--source suite/wsrep/include/check_galera_version.inc
+
+--connection node_1
+CALL mtr.add_suppression("\\[Warning\\] WSREP: evicting member .* at .* permanently from group");
+
+--connection node_2
+CALL mtr.add_suppression("\\[Warning\\] WSREP: handshake with .* .* failed: 'evicted'");
+CALL mtr.add_suppression("\\[ERROR\\] WSREP: exception from gcomm, backend must be restarted: this node has been evicted out of the cluster, gcomm backend restart is required \\(FATAL\\)");
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+CALL mtr.add_suppression("\\[Warning\\] WSREP: evicting member .* at .* permanently from group");
+
+--connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4
+CALL mtr.add_suppression("\\[Warning\\] WSREP: evicting member .* at .* permanently from group");
+
+# Save original auto_increment_offset values.
+
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--let $node_4=node_4
+--source include/auto_increment_offset_save.inc
+
+# Repeatedly isolate the node from the cluster until it's evicted.
+
+--disable_query_log
+--disable_result_log
+
+--let $evicted = 0
+while (!$evicted)
+{
+ --connection node_2
+
+ --let $wait_condition = SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+ --source include/wait_condition.inc
+
+ --let $wait_condition = SELECT VARIABLE_VALUE = 'ON' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready';
+ --source include/wait_condition.inc
+
+ --disable_query_log
+ --disable_result_log
+
+ SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+
+ --connection node_1
+ --let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+ --source include/wait_condition.inc
+
+ --disable_query_log
+ --disable_result_log
+
+ --connection node_2
+ SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
+
+ # As the cluster size doesn't change in case the node is evicted, there is
+ # no good condition to wait for in case the eviction happens, so sleep a bit.
+
+ --sleep 1
+
+ --connection node_1
+ --let $evicted = `SELECT VARIABLE_VALUE != '' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_evs_evict_list'`
+}
+
+--enable_query_log
+--enable_result_log
+
+--connection node_2
+--source include/wsrep_wait_disconnect.inc
+SET GLOBAL wsrep_on = OFF;
+
+# Check the status file to contain the eviction event message.
+
+--let $assert_count = 1
+--let $assert_file = $MYSQLTEST_VARDIR/mysqld.2/data/status2.json
+--let $assert_text = Node evicted
+--let $assert_select = "status": "evicted"
+--source include/assert_grep.inc
+
+# Re-join the node after restart.
+
+--source include/restart_mysqld.inc
+
+--let $wait_condition = SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+--source include/wait_condition.inc
+
+--source include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera/t/galera_ist_MDEV-28423.cnf b/mysql-test/suite/galera/t/galera_ist_MDEV-28423.cnf
index 691e52208b1..3d43138f3c7 100644
--- a/mysql-test/suite/galera/t/galera_ist_MDEV-28423.cnf
+++ b/mysql-test/suite/galera/t/galera_ist_MDEV-28423.cnf
@@ -3,7 +3,6 @@
[mysqld.1]
# server-id=101
#wsrep-debug=1
-innodb_file_per_table
innodb_autoinc_lock_mode=2
#wsrep_sst_method=rsync
wsrep_sst_method=mariabackup
@@ -20,7 +19,6 @@ log_bin=binlog
[mysqld.2]
# server-id=102
#wsrep-debug=1
-innodb_file_per_table
innodb_autoinc_lock_mode=2
#wsrep_sst_method=rsync
wsrep_sst_method=mariabackup
diff --git a/mysql-test/suite/galera/t/galera_ist_MDEV-28583.cnf b/mysql-test/suite/galera/t/galera_ist_MDEV-28583.cnf
index 3835cd02a41..6fc6e3421cb 100644
--- a/mysql-test/suite/galera/t/galera_ist_MDEV-28583.cnf
+++ b/mysql-test/suite/galera/t/galera_ist_MDEV-28583.cnf
@@ -3,7 +3,6 @@
[mysqld.1]
# server-id=101
#wsrep-debug=1
-innodb_file_per_table
innodb_autoinc_lock_mode=2
wsrep_sst_method=rsync
#wsrep_sst_method=mariabackup
@@ -20,7 +19,6 @@ log_bin=binlog
[mysqld.2]
# server-id=102
#wsrep-debug=1
-innodb_file_per_table
innodb_autoinc_lock_mode=2
wsrep_sst_method=rsync
#wsrep_sst_method=mariabackup
diff --git a/mysql-test/suite/galera/t/galera_load_data.cnf b/mysql-test/suite/galera/t/galera_load_data.cnf
index 43cc352c020..3385fbac1f6 100644
--- a/mysql-test/suite/galera/t/galera_load_data.cnf
+++ b/mysql-test/suite/galera/t/galera_load_data.cnf
@@ -2,7 +2,6 @@
[mysqld.1]
secure-file-priv = ""
-innodb_file_per_table = ON
innodb_stats_persistent=ON
innodb_stats_auto_recalc=ON
innodb_stats_persistent_sample_pages=20
@@ -11,7 +10,6 @@ skip-innodb-read-only-compressed
[mysqld.2]
secure-file-priv = ""
-innodb_file_per_table = ON
innodb_stats_persistent=ON
innodb_stats_auto_recalc=ON
innodb_stats_persistent_sample_pages=20
diff --git a/mysql-test/suite/galera/t/galera_sst_encrypted.cnf b/mysql-test/suite/galera/t/galera_sst_encrypted.cnf
index e10c3e9f215..596bb95ac6a 100644
--- a/mysql-test/suite/galera/t/galera_sst_encrypted.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_encrypted.cnf
@@ -7,7 +7,6 @@ loose-file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys.txt
loose-file-key-management-encryption-algorithm=aes_cbc
wsrep-debug=1
innodb_encryption_threads = 4
-innodb_file_per_table=1
wsrep_sst_method=rsync
[mysqld.1]
diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf b/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf
index d3fff4fcb0b..857a4101406 100644
--- a/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf
@@ -4,7 +4,6 @@
wsrep_sst_method=mariabackup
wsrep_sst_auth="root:"
wsrep_debug=1
-innodb-file-per-table=ON
[mysqld.1]
wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true'
diff --git a/mysql-test/suite/gcol/inc/gcol_keys.inc b/mysql-test/suite/gcol/inc/gcol_keys.inc
index e5ac0afd92a..9996d23e42e 100644
--- a/mysql-test/suite/gcol/inc/gcol_keys.inc
+++ b/mysql-test/suite/gcol/inc/gcol_keys.inc
@@ -14,6 +14,8 @@
# Change: #
################################################################################
+--source include/have_sequence.inc
+
if (!$support_virtual_index) {
let $skip_spatial_index_check=1;
let $skip_foreign_key_check=1;
@@ -197,12 +199,16 @@ PRIMARY KEY (pk),
KEY (col_time_key),
KEY (col_datetime_key));
+--disable_warnings
INSERT INTO c ( col_time_nokey,col_datetime_nokey,col_varchar_nokey) values
('14:03:03.042673','2001-11-28 00:50:27.051028', 'c'),
('01:46:09.016386','2007-10-09 19:53:04.008332', NULL),
('16:21:18.052408','2001-11-08 21:02:12.009395', 'x'),
('18:56:33.027423','2003-04-01 00:00:00', 'i');
+insert into c (col_time_nokey,col_datetime_nokey,col_varchar_nokey) select '10:10:10', '2021-12-24 01:50:27', 'z' from seq_1_to_10;
+--enable_warnings
+
--replace_column 9 x 10 x
EXPLAIN SELECT
outr.col_time_key AS x
diff --git a/mysql-test/suite/gcol/inc/gcol_select.inc b/mysql-test/suite/gcol/inc/gcol_select.inc
index 2386c55fdbc..4c030cb5646 100644
--- a/mysql-test/suite/gcol/inc/gcol_select.inc
+++ b/mysql-test/suite/gcol/inc/gcol_select.inc
@@ -545,11 +545,11 @@ CREATE TABLE cc (
);
INSERT INTO cc (col_int_nokey) VALUES (0),(1),(7),(0),(4),(5);
--replace_column 9 # 10 #
-EXPLAIN SELECT pk FROM cc WHERE col_int_key > 3;
-SELECT pk FROM cc WHERE col_int_key > 3;
+EXPLAIN SELECT pk FROM cc force index(col_int_key) WHERE col_int_key > 3;
+SELECT pk FROM cc force index(col_int_key) WHERE col_int_key > 3;
--replace_column 9 # 10 #
-EXPLAIN SELECT pk FROM cc WHERE col_int_key > 3 ORDER BY 1;
-SELECT pk FROM cc WHERE col_int_key > 3 ORDER BY 1;
+EXPLAIN SELECT pk FROM cc force index(col_int_key) WHERE col_int_key > 3 ORDER BY 1;
+SELECT pk FROM cc force index(col_int_key) WHERE col_int_key > 3 ORDER BY 1;
DROP TABLE cc;
--echo #
diff --git a/mysql-test/suite/gcol/r/gcol_bugfixes.result b/mysql-test/suite/gcol/r/gcol_bugfixes.result
index f124ebe611c..7b70f61df03 100644
--- a/mysql-test/suite/gcol/r/gcol_bugfixes.result
+++ b/mysql-test/suite/gcol/r/gcol_bugfixes.result
@@ -638,10 +638,10 @@ DEFAULT SUBSTRING_INDEX(USER(),'@',1)
);
EXPLAIN UPDATE gafld SET nuigafld = 0 WHERE nuigafld = 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE gafld ALL NULL NULL NULL NULL 1 Using where
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
EXPLAIN UPDATE gafld SET nuigafld = 0 WHERE nuigafld = 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE gafld ALL NULL NULL NULL NULL 1 Using where
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
DROP TABLE gafld;
# (duplicate) MDEV-17653 replace into generated columns is unstable
# Some columns are snipped from the MDEV test
diff --git a/mysql-test/suite/gcol/r/gcol_ins_upd_innodb.result b/mysql-test/suite/gcol/r/gcol_ins_upd_innodb.result
index 193ef064da8..bc3b5493dbd 100644
--- a/mysql-test/suite/gcol/r/gcol_ins_upd_innodb.result
+++ b/mysql-test/suite/gcol/r/gcol_ins_upd_innodb.result
@@ -497,7 +497,7 @@ WHERE OUTR1.pk = 1
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY OUTR1 const PRIMARY PRIMARY 4 const 1
1 PRIMARY INNR1 ALL NULL NULL NULL NULL 2 Using where; FirstMatch(OUTR1)
-1 PRIMARY OUTR2 index NULL PRIMARY 4 NULL 2 Using index
+1 PRIMARY OUTR2 ALL NULL NULL NULL NULL 2
DROP TABLE IF EXISTS b,bb,d;
#
# Bug#21216067 ASSERTION FAILED ROW_UPD_SEC_INDEX_ENTRY (INNOBASE/ROW/ROW0UPD.CC:2103)
diff --git a/mysql-test/suite/gcol/r/gcol_keys_innodb.result b/mysql-test/suite/gcol/r/gcol_keys_innodb.result
index 0228f9be842..4b1e5b48327 100644
--- a/mysql-test/suite/gcol/r/gcol_keys_innodb.result
+++ b/mysql-test/suite/gcol/r/gcol_keys_innodb.result
@@ -192,11 +192,7 @@ INSERT INTO c ( col_time_nokey,col_datetime_nokey,col_varchar_nokey) values
('01:46:09.016386','2007-10-09 19:53:04.008332', NULL),
('16:21:18.052408','2001-11-08 21:02:12.009395', 'x'),
('18:56:33.027423','2003-04-01 00:00:00', 'i');
-Warnings:
-Note 1265 Data truncated for column 'col_time_key' at row 1
-Note 1265 Data truncated for column 'col_time_key' at row 2
-Note 1265 Data truncated for column 'col_time_key' at row 3
-Note 1265 Data truncated for column 'col_time_key' at row 4
+insert into c (col_time_nokey,col_datetime_nokey,col_varchar_nokey) select '10:10:10', '2021-12-24 01:50:27', 'z' from seq_1_to_10;
EXPLAIN SELECT
outr.col_time_key AS x
FROM c as outr
diff --git a/mysql-test/suite/gcol/r/gcol_keys_myisam.result b/mysql-test/suite/gcol/r/gcol_keys_myisam.result
index 523ff3a3764..3c9093cf9cd 100644
--- a/mysql-test/suite/gcol/r/gcol_keys_myisam.result
+++ b/mysql-test/suite/gcol/r/gcol_keys_myisam.result
@@ -192,11 +192,7 @@ INSERT INTO c ( col_time_nokey,col_datetime_nokey,col_varchar_nokey) values
('01:46:09.016386','2007-10-09 19:53:04.008332', NULL),
('16:21:18.052408','2001-11-08 21:02:12.009395', 'x'),
('18:56:33.027423','2003-04-01 00:00:00', 'i');
-Warnings:
-Note 1265 Data truncated for column 'col_time_key' at row 1
-Note 1265 Data truncated for column 'col_time_key' at row 2
-Note 1265 Data truncated for column 'col_time_key' at row 3
-Note 1265 Data truncated for column 'col_time_key' at row 4
+insert into c (col_time_nokey,col_datetime_nokey,col_varchar_nokey) select '10:10:10', '2021-12-24 01:50:27', 'z' from seq_1_to_10;
EXPLAIN SELECT
outr.col_time_key AS x
FROM c as outr
diff --git a/mysql-test/suite/gcol/r/gcol_select_innodb.result b/mysql-test/suite/gcol/r/gcol_select_innodb.result
index 72d1e9f320c..b57aceac9e0 100644
--- a/mysql-test/suite/gcol/r/gcol_select_innodb.result
+++ b/mysql-test/suite/gcol/r/gcol_select_innodb.result
@@ -146,7 +146,7 @@ count(distinct c)
3
explain select count(distinct c) from t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL c 5 NULL 6 Using index for group-by
+1 SIMPLE t1 range NULL c 5 NULL 5 Using index for group-by
###
### filesort & range-based utils
###
@@ -699,11 +699,10 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 3 Using where; Start temporary
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where; End temporary
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 3 Using where
SELECT /*+ NO_SEMIJOIN(@subq1) */ t1.c1, t2.i1
FROM t1 STRAIGHT_JOIN t3 STRAIGHT_JOIN t2
WHERE ( t3.pk IN
@@ -754,11 +753,10 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 3 Using where; Start temporary
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where; End temporary
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 3 Using where
SELECT /*+ NO_SEMIJOIN(@subq1) */ t1.c1, t2.i1
FROM t1 STRAIGHT_JOIN t3 STRAIGHT_JOIN t2
WHERE ( t3.pk IN
@@ -810,11 +808,10 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 3 Using where; Start temporary
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where; End temporary
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 3 Using where
SELECT /*+ NO_SEMIJOIN(@subq1) */ t1.c1, t2.i1
FROM t1 STRAIGHT_JOIN t3 STRAIGHT_JOIN t2
WHERE ( t3.pk IN
@@ -874,11 +871,10 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 3 Using where; Start temporary
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where; End temporary
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 3 Using where
SELECT /*+ NO_SEMIJOIN(@subq1) */ t1.c1, t2.i1, t3.i1
FROM t1 STRAIGHT_JOIN t3 STRAIGHT_JOIN t2
WHERE ( t3.pk IN
diff --git a/mysql-test/suite/gcol/r/gcol_select_myisam.result b/mysql-test/suite/gcol/r/gcol_select_myisam.result
index 0d18976f3bf..070808fec4f 100644
--- a/mysql-test/suite/gcol/r/gcol_select_myisam.result
+++ b/mysql-test/suite/gcol/r/gcol_select_myisam.result
@@ -146,7 +146,7 @@ count(distinct c)
3
explain select count(distinct c) from t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL c 5 NULL 6 Using index for group-by
+1 SIMPLE t1 range NULL c 5 NULL 5 Using index for group-by
###
### filesort & range-based utils
###
@@ -792,18 +792,18 @@ PRIMARY KEY (pk),
KEY (col_int_key)
);
INSERT INTO cc (col_int_nokey) VALUES (0),(1),(7),(0),(4),(5);
-EXPLAIN SELECT pk FROM cc WHERE col_int_key > 3;
+EXPLAIN SELECT pk FROM cc force index(col_int_key) WHERE col_int_key > 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE cc range col_int_key col_int_key 5 NULL # #
-SELECT pk FROM cc WHERE col_int_key > 3;
+SELECT pk FROM cc force index(col_int_key) WHERE col_int_key > 3;
pk
5
6
3
-EXPLAIN SELECT pk FROM cc WHERE col_int_key > 3 ORDER BY 1;
+EXPLAIN SELECT pk FROM cc force index(col_int_key) WHERE col_int_key > 3 ORDER BY 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE cc range col_int_key col_int_key 5 NULL # #
-SELECT pk FROM cc WHERE col_int_key > 3 ORDER BY 1;
+SELECT pk FROM cc force index(col_int_key) WHERE col_int_key > 3 ORDER BY 1;
pk
3
5
@@ -1325,11 +1325,10 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t3 eq_ref PRIMARY,v_idx PRIMARY 4 test.t4.i1 1 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 3 Using where; Start temporary
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+1 PRIMARY t3 eq_ref PRIMARY,v_idx PRIMARY 4 test.t4.i1 1 Using where; End temporary
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 3 Using where
SELECT /*+ NO_SEMIJOIN(@subq1) */ t1.c1, t2.i1
FROM t1 STRAIGHT_JOIN t3 STRAIGHT_JOIN t2
WHERE ( t3.pk IN
@@ -1381,11 +1380,10 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t3 eq_ref PRIMARY,v_idx,v_idx2 PRIMARY 4 test.t4.i1 1 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 3 Using where; Start temporary
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+1 PRIMARY t3 eq_ref PRIMARY,v_idx,v_idx2 PRIMARY 4 test.t4.i1 1 Using where; End temporary
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 3 Using where
SELECT /*+ NO_SEMIJOIN(@subq1) */ t1.c1, t2.i1
FROM t1 STRAIGHT_JOIN t3 STRAIGHT_JOIN t2
WHERE ( t3.pk IN
@@ -1439,11 +1437,10 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t3 eq_ref PRIMARY,v_idx2 PRIMARY 4 test.t4.i1 1 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 3 Using where; Start temporary
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+1 PRIMARY t3 eq_ref PRIMARY,v_idx2 PRIMARY 4 test.t4.i1 1 Using where; End temporary
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 3 Using where
SELECT /*+ NO_SEMIJOIN(@subq1) */ t1.c1, t2.i1
FROM t1 STRAIGHT_JOIN t3 STRAIGHT_JOIN t2
WHERE ( t3.pk IN
@@ -1506,11 +1503,10 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
-1 PRIMARY t3 eq_ref PRIMARY,v_idx PRIMARY 4 test.t4.i1 1 Using where
+1 PRIMARY t4 ALL NULL NULL NULL NULL 3 Using where; Start temporary
+1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+1 PRIMARY t3 eq_ref PRIMARY,v_idx PRIMARY 4 test.t4.i1 1 Using where; End temporary
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
-2 MATERIALIZED t4 ALL NULL NULL NULL NULL 3 Using where
SELECT /*+ NO_SEMIJOIN(@subq1) */ t1.c1, t2.i1, t3.i1
FROM t1 STRAIGHT_JOIN t3 STRAIGHT_JOIN t2
WHERE ( t3.pk IN
diff --git a/mysql-test/suite/handler/aria.result b/mysql-test/suite/handler/aria.result
index b0a4a173289..abed2cfccc7 100644
--- a/mysql-test/suite/handler/aria.result
+++ b/mysql-test/suite/handler/aria.result
@@ -1540,8 +1540,6 @@ BEGIN
SELECT 1 FROM t2 INTO @a;
RETURN 1;
END|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT f1();
ERROR 42S02: Table 'test.t2' doesn't exist
CREATE TABLE t1(a INT);
diff --git a/mysql-test/suite/handler/heap.result b/mysql-test/suite/handler/heap.result
index e66bccb1341..75355ba029c 100644
--- a/mysql-test/suite/handler/heap.result
+++ b/mysql-test/suite/handler/heap.result
@@ -1539,8 +1539,6 @@ BEGIN
SELECT 1 FROM t2 INTO @a;
RETURN 1;
END|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT f1();
ERROR 42S02: Table 'test.t2' doesn't exist
CREATE TABLE t1(a INT);
diff --git a/mysql-test/suite/handler/innodb.result b/mysql-test/suite/handler/innodb.result
index 5d44642db01..5305a80ab9e 100644
--- a/mysql-test/suite/handler/innodb.result
+++ b/mysql-test/suite/handler/innodb.result
@@ -1544,8 +1544,6 @@ BEGIN
SELECT 1 FROM t2 INTO @a;
RETURN 1;
END|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT f1();
ERROR 42S02: Table 'test.t2' doesn't exist
CREATE TABLE t1(a INT);
diff --git a/mysql-test/suite/handler/myisam.result b/mysql-test/suite/handler/myisam.result
index cc817ccd889..94c3791d837 100644
--- a/mysql-test/suite/handler/myisam.result
+++ b/mysql-test/suite/handler/myisam.result
@@ -1540,8 +1540,6 @@ BEGIN
SELECT 1 FROM t2 INTO @a;
RETURN 1;
END|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT f1();
ERROR 42S02: Table 'test.t2' doesn't exist
CREATE TABLE t1(a INT);
diff --git a/mysql-test/suite/heap/heap_btree.result b/mysql-test/suite/heap/heap_btree.result
index 526c76a52e8..3eb618a0680 100644
--- a/mysql-test/suite/heap/heap_btree.result
+++ b/mysql-test/suite/heap/heap_btree.result
@@ -70,6 +70,10 @@ alter table t1 engine=myisam;
explain select * from t1 where a in (869751,736494,226312,802616);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range uniq_id uniq_id 4 NULL 4 Using where; Using index
+insert into t1 values (1),(2),(3),(4),(5),(6);
+explain select * from t1 where a in (869751,736494,226312,802616);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range uniq_id uniq_id 4 NULL 4 Using where; Using index
drop table t1;
create table t1 (x int not null, y int not null, key x using BTREE (x,y), unique y using BTREE (y))
engine=heap;
diff --git a/mysql-test/suite/heap/heap_btree.test b/mysql-test/suite/heap/heap_btree.test
index d3fbe4cc0d2..e8f7c02c6f3 100644
--- a/mysql-test/suite/heap/heap_btree.test
+++ b/mysql-test/suite/heap/heap_btree.test
@@ -48,6 +48,8 @@ select * from t1 where a in (869751,736494,226312,802616);
explain select * from t1 where a in (869751,736494,226312,802616);
alter table t1 engine=myisam;
explain select * from t1 where a in (869751,736494,226312,802616);
+insert into t1 values (1),(2),(3),(4),(5),(6);
+explain select * from t1 where a in (869751,736494,226312,802616);
drop table t1;
create table t1 (x int not null, y int not null, key x using BTREE (x,y), unique y using BTREE (y))
diff --git a/mysql-test/suite/innodb/include/innodb_bulk_create_index.inc b/mysql-test/suite/innodb/include/innodb_bulk_create_index.inc
index 3c10517933f..438b7b17d51 100644
--- a/mysql-test/suite/innodb/include/innodb_bulk_create_index.inc
+++ b/mysql-test/suite/innodb/include/innodb_bulk_create_index.inc
@@ -35,8 +35,6 @@ if ($row_format != 'COMPRESSED')
if ($row_format == 'COMPRESSED')
{
- SET GLOBAL innodb_file_per_table=1;
-
eval CREATE TABLE t1(
class INT,
id INT,
@@ -176,10 +174,4 @@ SELECT CHAR_LENGTH(b) FROM t1 WHERE a=4975;
DROP TABLE t1;
-# Restore global variables
-if ($row_format == 'COMPRESSED')
-{
- SET GLOBAL innodb_file_per_table=default;
-}
-
DROP PROCEDURE populate_t1;
diff --git a/mysql-test/suite/innodb/include/innodb_bulk_create_index_debug.inc b/mysql-test/suite/innodb/include/innodb_bulk_create_index_debug.inc
index 85466e5e4ae..dbdadd0b7f9 100644
--- a/mysql-test/suite/innodb/include/innodb_bulk_create_index_debug.inc
+++ b/mysql-test/suite/innodb/include/innodb_bulk_create_index_debug.inc
@@ -38,8 +38,6 @@ if ($row_format != 'COMPRESSED')
if ($row_format == 'COMPRESSED')
{
- SET GLOBAL innodb_file_per_table=1;
-
eval CREATE TABLE t1(
class INT,
id INT,
@@ -69,8 +67,6 @@ if ($row_format != 'COMPRESSED') {
}
if ($row_format == 'COMPRESSED') {
- SET GLOBAL innodb_file_per_table=1;
-
eval CREATE TABLE t1(
a INT PRIMARY KEY,
b TEXT,
@@ -124,8 +120,6 @@ if ($row_format != 'COMPRESSED')
if ($row_format == 'COMPRESSED')
{
- SET GLOBAL innodb_file_per_table=1;
-
eval CREATE TABLE t1(
class INT,
id INT,
@@ -174,8 +168,6 @@ if ($row_format != 'COMPRESSED') {
}
if ($row_format == 'COMPRESSED') {
- SET GLOBAL innodb_file_per_table=1;
-
eval CREATE TABLE t1(
a INT PRIMARY KEY,
b TEXT,
diff --git a/mysql-test/suite/innodb/include/innodb_merge_threshold_delete.inc b/mysql-test/suite/innodb/include/innodb_merge_threshold_delete.inc
index 8c60cd6e230..68f34978f23 100644
--- a/mysql-test/suite/innodb/include/innodb_merge_threshold_delete.inc
+++ b/mysql-test/suite/innodb/include/innodb_merge_threshold_delete.inc
@@ -1,6 +1,6 @@
#
# Test to cause merge of the pages (by deleting)
-# test/tab1 should be created already with innodb_file_per_table=ON
+# test/tab1 should be created already
# The definition is intended to be based on
# "create table tab1 (a bigint primary key, b varchar(2048)) engine=InnoDB;"
#
diff --git a/mysql-test/suite/innodb/include/innodb_merge_threshold_secondary.inc b/mysql-test/suite/innodb/include/innodb_merge_threshold_secondary.inc
index 8e821365e3f..d49272c4087 100644
--- a/mysql-test/suite/innodb/include/innodb_merge_threshold_secondary.inc
+++ b/mysql-test/suite/innodb/include/innodb_merge_threshold_secondary.inc
@@ -1,6 +1,6 @@
#
# Test to cause merge of the pages (at secondary index by deleting)
-# test/tab1 should be created already with innodb_file_per_table=ON
+# test/tab1 should be created already
# The definition is intended to be based on
# "create table tab1 (a bigint primary key, b blob) engine=InnoDB row_format=dynamic;"
# "create index index1 on tab1(b(750));"
diff --git a/mysql-test/suite/innodb/include/innodb_merge_threshold_update.inc b/mysql-test/suite/innodb/include/innodb_merge_threshold_update.inc
index 61e961ac6ae..ae720e0488d 100644
--- a/mysql-test/suite/innodb/include/innodb_merge_threshold_update.inc
+++ b/mysql-test/suite/innodb/include/innodb_merge_threshold_update.inc
@@ -1,6 +1,6 @@
#
# Test to cause merge of the pages (by updating to smaller)
-# test/tab1 should be created already with innodb_file_per_table=ON
+# test/tab1 should be created already
# The definition is intended to be based on
# "create table tab1 (a bigint primary key, b varchar(2048)) engine=InnoDB;"
#
diff --git a/mysql-test/suite/innodb/r/alter_algorithm,INPLACE.rdiff b/mysql-test/suite/innodb/r/alter_algorithm,INPLACE.rdiff
index a176a9af29e..79b3d8854fa 100644
--- a/mysql-test/suite/innodb/r/alter_algorithm,INPLACE.rdiff
+++ b/mysql-test/suite/innodb/r/alter_algorithm,INPLACE.rdiff
@@ -1,5 +1,5 @@
---- alter_algorithm.result 2020-04-30 21:39:48.923115511 +0530
-+++ alter_algorithm.reject 2020-04-30 21:45:04.131642093 +0530
+--- alter_algorithm.result
++++ alter_algorithm.reject
@@ -7,43 +7,43 @@
INSERT INTO t1(f1, f2, f3) VALUES(1, 1, 1);
SELECT @@alter_algorithm;
diff --git a/mysql-test/suite/innodb/r/alter_algorithm,INSTANT.rdiff b/mysql-test/suite/innodb/r/alter_algorithm,INSTANT.rdiff
index 414b7dc8b0c..fbcb5ca8704 100644
--- a/mysql-test/suite/innodb/r/alter_algorithm,INSTANT.rdiff
+++ b/mysql-test/suite/innodb/r/alter_algorithm,INSTANT.rdiff
@@ -1,5 +1,5 @@
---- alter_algorithm.result 2020-04-30 21:39:48.923115511 +0530
-+++ alter_algorithm.reject 2020-04-30 21:47:08.245465018 +0530
+--- alter_algorithm.result
++++ alter_algorithm.reject
@@ -7,43 +7,35 @@
INSERT INTO t1(f1, f2, f3) VALUES(1, 1, 1);
SELECT @@alter_algorithm;
diff --git a/mysql-test/suite/innodb/r/alter_algorithm,NOCOPY.rdiff b/mysql-test/suite/innodb/r/alter_algorithm,NOCOPY.rdiff
index 2aa8c72265a..44e9f63a5f4 100644
--- a/mysql-test/suite/innodb/r/alter_algorithm,NOCOPY.rdiff
+++ b/mysql-test/suite/innodb/r/alter_algorithm,NOCOPY.rdiff
@@ -1,5 +1,5 @@
---- alter_algorithm.result 2020-04-30 21:39:48.923115511 +0530
-+++ alter_algorithm.reject 2020-04-30 21:52:10.785967739 +0530
+--- alter_algorithm.result
++++ alter_algorithm.reject
@@ -7,43 +7,35 @@
INSERT INTO t1(f1, f2, f3) VALUES(1, 1, 1);
SELECT @@alter_algorithm;
diff --git a/mysql-test/suite/innodb/r/alter_kill.result b/mysql-test/suite/innodb/r/alter_kill.result
index 0283d1079e1..037b06fffbc 100644
--- a/mysql-test/suite/innodb/r/alter_kill.result
+++ b/mysql-test/suite/innodb/r/alter_kill.result
@@ -1,7 +1,6 @@
#
# Bug#16720368 INNODB CRASHES ON BROKEN #SQL*.IBD FILE AT STARTUP
#
-SET GLOBAL innodb_file_per_table=1;
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
CREATE TABLE bug16720368_1 (a INT PRIMARY KEY) ENGINE=InnoDB;
connect con1,localhost,root;
@@ -30,7 +29,6 @@ DROP TABLE bug16720368, bug16720368_1;
# Bug#16735660 ASSERT TABLE2 == NULL, ROLLBACK OF RESURRECTED TXNS,
# DICT_TABLE_ADD_TO_CACHE
#
-SET GLOBAL innodb_file_per_table=1;
CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
BEGIN;
INSERT INTO t1 VALUES(42);
diff --git a/mysql-test/suite/innodb/r/alter_missing_tablespace.result b/mysql-test/suite/innodb/r/alter_missing_tablespace.result
index 3d071584256..65b01d89d91 100644
--- a/mysql-test/suite/innodb/r/alter_missing_tablespace.result
+++ b/mysql-test/suite/innodb/r/alter_missing_tablespace.result
@@ -2,7 +2,6 @@
# Bug#13955083 ALLOW IN-PLACE DDL OPERATIONS ON MISSING
# OR DISCARDED TABLESPACES
#
-SET GLOBAL innodb_file_per_table=1;
CREATE TABLE t(a SERIAL)ENGINE=InnoDB;
CREATE TABLE `x..d` (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
CREATE TABLE t1(a SERIAL)ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb/r/alter_rename_existing.result b/mysql-test/suite/innodb/r/alter_rename_existing.result
index 1fdc2011a24..e4fee341964 100644
--- a/mysql-test/suite/innodb/r/alter_rename_existing.result
+++ b/mysql-test/suite/innodb/r/alter_rename_existing.result
@@ -36,6 +36,8 @@ ERROR HY000: Error on rename of 'OLD_FILE_NAME' to 'NEW_FILE_NAME' (errno: 184 "
# Create another t1, but in the system tablespace.
#
SET GLOBAL innodb_file_per_table=OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t1 (a SERIAL, b CHAR(20)) ENGINE=InnoDB;
INSERT INTO t1(b) VALUES('one'), ('two'), ('three');
SHOW CREATE TABLE t1;
@@ -58,6 +60,8 @@ ALTER TABLE t1 ADD COLUMN d INT, ALGORITHM=COPY;
# while a blocking t1.ibd file exists.
#
SET GLOBAL innodb_file_per_table=ON;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
ALTER TABLE t1 FORCE, ALGORITHM=INPLACE;
ERROR HY000: Tablespace for table 'test/#sql-ib' exists. Please DISCARD the tablespace before IMPORT
ALTER TABLE t1 FORCE, ALGORITHM=COPY;
@@ -93,3 +97,5 @@ a b
2 two
3 three
DROP TABLE t1;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
diff --git a/mysql-test/suite/innodb/r/check_ibd_filesize,32k.rdiff b/mysql-test/suite/innodb/r/check_ibd_filesize,32k.rdiff
index 44446602b9f..b19abb449ad 100644
--- a/mysql-test/suite/innodb/r/check_ibd_filesize,32k.rdiff
+++ b/mysql-test/suite/innodb/r/check_ibd_filesize,32k.rdiff
@@ -1,5 +1,5 @@
---- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
-+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:29:25.129637040 +0530
+--- mysql-test/suite/innodb/r/check_ibd_filesize.result
++++ mysql-test/suite/innodb/r/check_ibd_filesize.reject
@@ -3,18 +3,12 @@
# SPACE IN 5.7 THAN IN 5.6
#
diff --git a/mysql-test/suite/innodb/r/check_ibd_filesize,4k.rdiff b/mysql-test/suite/innodb/r/check_ibd_filesize,4k.rdiff
index ef55ad971fe..a1011a1cb90 100644
--- a/mysql-test/suite/innodb/r/check_ibd_filesize,4k.rdiff
+++ b/mysql-test/suite/innodb/r/check_ibd_filesize,4k.rdiff
@@ -1,5 +1,5 @@
---- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
-+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:31:39.288769153 +0530
+--- mysql-test/suite/innodb/r/check_ibd_filesize.result
++++ mysql-test/suite/innodb/r/check_ibd_filesize.reject
@@ -3,18 +3,18 @@
# SPACE IN 5.7 THAN IN 5.6
#
diff --git a/mysql-test/suite/innodb/r/check_ibd_filesize,64k.rdiff b/mysql-test/suite/innodb/r/check_ibd_filesize,64k.rdiff
index bcdcea31160..010eb8a284d 100644
--- a/mysql-test/suite/innodb/r/check_ibd_filesize,64k.rdiff
+++ b/mysql-test/suite/innodb/r/check_ibd_filesize,64k.rdiff
@@ -1,5 +1,5 @@
---- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
-+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:30:28.957174270 +0530
+--- mysql-test/suite/innodb/r/check_ibd_filesize.result
++++ mysql-test/suite/innodb/r/check_ibd_filesize.reject
@@ -3,18 +3,12 @@
# SPACE IN 5.7 THAN IN 5.6
#
diff --git a/mysql-test/suite/innodb/r/check_ibd_filesize,8k.rdiff b/mysql-test/suite/innodb/r/check_ibd_filesize,8k.rdiff
index 7b699ef4cea..0e1e64724b0 100644
--- a/mysql-test/suite/innodb/r/check_ibd_filesize,8k.rdiff
+++ b/mysql-test/suite/innodb/r/check_ibd_filesize,8k.rdiff
@@ -1,5 +1,5 @@
---- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
-+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:31:03.516962339 +0530
+--- mysql-test/suite/innodb/r/check_ibd_filesize.result
++++ mysql-test/suite/innodb/r/check_ibd_filesize.reject
@@ -3,18 +3,18 @@
# SPACE IN 5.7 THAN IN 5.6
#
diff --git a/mysql-test/suite/innodb/r/defrag_mdl-9155.result b/mysql-test/suite/innodb/r/defrag_mdl-9155.result
index 18b71f42381..b1b70de3373 100644
--- a/mysql-test/suite/innodb/r/defrag_mdl-9155.result
+++ b/mysql-test/suite/innodb/r/defrag_mdl-9155.result
@@ -1,4 +1,6 @@
set global innodb_defragment=1;
+Warnings:
+Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
create table t1 (a int not null primary key auto_increment, b varchar(256), key second(a, b)) engine=innodb;
insert t1 select null, repeat('a', 256) from seq_1_to_100;
select count(*) from t1;
@@ -16,3 +18,5 @@ test.t1 optimize status OK
connection con1;
drop table t1;
set global innodb_defragment=default;
+Warnings:
+Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
diff --git a/mysql-test/suite/innodb/r/dropdb_cs.result b/mysql-test/suite/innodb/r/dropdb_cs.result
index 59f02c74a04..3837bf9e3f5 100644
--- a/mysql-test/suite/innodb/r/dropdb_cs.result
+++ b/mysql-test/suite/innodb/r/dropdb_cs.result
@@ -3,6 +3,8 @@
#
SET @save_fpt=@@GLOBAL.innodb_file_per_table;
SET GLOBAL innodb_file_per_table=0;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE DATABASE Db;
CREATE TABLE Db.t1 (c1 INT KEY) ENGINE=InnoDB;
CREATE DATABASE DB;
@@ -14,3 +16,5 @@ DROP DATABASE Db;
SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'D%';
TABLE_ID NAME FLAG N_COLS SPACE ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE
SET GLOBAL innodb_file_per_table=@save_fpt;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
diff --git a/mysql-test/suite/innodb/r/gap_locks.result b/mysql-test/suite/innodb/r/gap_locks.result
index cd60b1fab22..d98b71982c8 100644
--- a/mysql-test/suite/innodb/r/gap_locks.result
+++ b/mysql-test/suite/innodb/r/gap_locks.result
@@ -1,6 +1,6 @@
CREATE TABLE t1(a INT PRIMARY KEY, b VARCHAR(40), c INT, INDEX(b,c))
ENGINE=InnoDB;
-INSERT INTO t1 VALUES (1,'1',1),(2,'2',1);
+INSERT INTO t1 VALUES (1,'1',1),(2,'2',1),(3,'3',1);
SET @save_locks= @@GLOBAL.innodb_status_output_locks;
SET GLOBAL INNODB_STATUS_OUTPUT_LOCKS = 'ON';
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
diff --git a/mysql-test/suite/innodb/r/ibuf_delete.result b/mysql-test/suite/innodb/r/ibuf_delete.result
deleted file mode 100644
index 7423d3cfa3e..00000000000
--- a/mysql-test/suite/innodb/r/ibuf_delete.result
+++ /dev/null
@@ -1,53 +0,0 @@
-SET @buffering= @@innodb_change_buffering;
-SET GLOBAL innodb_change_buffering= deletes;
-Warnings:
-Warning 1287 '@@innodb_change_buffering' is deprecated and will be removed in a future release
-SET @flush= @@innodb_flush_log_at_trx_commit;
-SET GLOBAL innodb_flush_log_at_trx_commit= 0;
-CREATE TABLE t1 (
-a varchar(1024),
-b varchar(1024),
-c varchar(1024),
-d varchar(1024),
-e varchar(1024),
-f varchar(1024),
-g varchar(1024),
-h varchar(1024),
-key (a),
-key (b),
-key (c),
-key (d)
-) ENGINE=InnoDB;
-INSERT INTO t1
-SELECT REPEAT('x',10), REPEAT('x',13), REPEAT('x',427), REPEAT('x',244),
-REPEAT('x',9), REPEAT('x',112), REPEAT('x',814), REPEAT('x',633)
-FROM seq_1_to_1024;
-CREATE TEMPORARY TABLE t2 (
-a varchar(1024),
-b varchar(1024),
-c varchar(1024),
-d varchar(1024),
-e varchar(1024),
-f varchar(1024),
-g varchar(1024),
-h varchar(1024),
-i varchar(1024),
-j varchar(1024),
-k varchar(1024),
-l varchar(1024),
-m varchar(1024),
-key (a),
-key (b),
-key (c),
-key (d),
-key (e),
-key (f)
-) ENGINE=InnoDB;
-SET @x=REPEAT('x',512);
-INSERT INTO t2 SELECT @x, @x, @x, @x, @x, @x, @x, @x, @x, @x, @x, @x, @x
-FROM seq_1_to_768;
-DROP TABLE t1, t2;
-SET GLOBAL innodb_change_buffering= @buffering;
-Warnings:
-Warning 1287 '@@innodb_change_buffering' is deprecated and will be removed in a future release
-SET GLOBAL innodb_flush_log_at_trx_commit= @flush;
diff --git a/mysql-test/suite/innodb/r/ibuf_not_empty.result b/mysql-test/suite/innodb/r/ibuf_not_empty.result
deleted file mode 100644
index 1dc7f0324d3..00000000000
--- a/mysql-test/suite/innodb/r/ibuf_not_empty.result
+++ /dev/null
@@ -1,20 +0,0 @@
-CREATE TABLE t1(
-a INT AUTO_INCREMENT PRIMARY KEY,
-b CHAR(1),
-c INT,
-INDEX(b))
-ENGINE=InnoDB STATS_PERSISTENT=0;
-SET GLOBAL innodb_change_buffering_debug = 1;
-SET GLOBAL innodb_change_buffering=all;
-Warnings:
-Warning 1287 '@@innodb_change_buffering' is deprecated and will be removed in a future release
-INSERT INTO t1 SELECT 0,'x',1 FROM seq_1_to_1024;
-# restart: --innodb-force-recovery=6 --innodb-change-buffer-dump
-check table t1;
-Table Op Msg_type Msg_text
-test.t1 check Warning InnoDB: Index 'b' contains 990 entries, should be 1024.
-test.t1 check error Corrupt
-# restart: --innodb-force_recovery=0
-SET GLOBAL innodb_fast_shutdown=0;
-# restart: --innodb-force_recovery=0
-DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/index_tree_operation.result b/mysql-test/suite/innodb/r/index_tree_operation.result
index 29660962e0c..92565333a93 100644
--- a/mysql-test/suite/innodb/r/index_tree_operation.result
+++ b/mysql-test/suite/innodb/r/index_tree_operation.result
@@ -2,7 +2,6 @@
# Bug#15923864 (Bug#67718):
# INNODB DRASTICALLY UNDER-FILLS PAGES IN CERTAIN CONDITIONS
#
-SET GLOBAL innodb_file_per_table=ON;
CREATE TABLE t1 (a BIGINT PRIMARY KEY, b VARCHAR(4096)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (0, REPEAT('a', 4096));
INSERT INTO t1 VALUES (1000, REPEAT('a', 4096));
diff --git a/mysql-test/suite/innodb/r/innodb-fkcheck.result b/mysql-test/suite/innodb/r/innodb-fkcheck.result
index f86ba50597f..357d8aa11c1 100644
--- a/mysql-test/suite/innodb/r/innodb-fkcheck.result
+++ b/mysql-test/suite/innodb/r/innodb-fkcheck.result
@@ -1,6 +1,3 @@
-set global innodb_file_per_table = 1;
-drop table if exists b;
-drop database if exists bug_fk;
create database bug_fk;
use bug_fk;
CREATE TABLE b (
@@ -93,5 +90,5 @@ show warnings;
Level Code Message
b.frm
b.ibd
-drop table if exists b;
-drop database if exists bug_fk;
+drop table b;
+drop database bug_fk;
diff --git a/mysql-test/suite/innodb/r/innodb-index-online.result b/mysql-test/suite/innodb/r/innodb-index-online.result
index 7138c5d7a18..150f950d591 100644
--- a/mysql-test/suite/innodb/r/innodb-index-online.result
+++ b/mysql-test/suite/innodb/r/innodb-index-online.result
@@ -1,6 +1,4 @@
call mtr.add_suppression("InnoDB: Warning: Small buffer pool size");
-SET @global_innodb_file_per_table_orig = @@global.innodb_file_per_table;
-SET GLOBAL innodb_file_per_table = on;
CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 INT, c3 TEXT)
ENGINE=InnoDB STATS_PERSISTENT=0;
INSERT INTO t1 VALUES (1,1,''), (2,2,''), (3,3,''), (4,4,''), (5,5,'');
@@ -206,7 +204,7 @@ CREATE INDEX c2d ON t1(c2);
SHOW INDEX FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Ignored
t1 0 PRIMARY 1 c1 A 80 NULL NULL BTREE NO
-t1 1 c2d 1 c2 A 10 NULL NULL YES BTREE NO
+t1 1 c2d 1 c2 A 5 NULL NULL YES BTREE NO
EXPLAIN SELECT COUNT(*) FROM t1 WHERE c2 > 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c2d c2d 5 NULL 32 Using where; Using index
@@ -540,6 +538,5 @@ Table Op Msg_type Msg_text
test.t1 check status OK
DROP TABLE t1;
SET DEBUG_SYNC = 'RESET';
-SET GLOBAL innodb_file_per_table = @global_innodb_file_per_table_orig;
SET GLOBAL innodb_monitor_enable = default;
SET GLOBAL innodb_monitor_disable = default;
diff --git a/mysql-test/suite/innodb/r/innodb-index.result b/mysql-test/suite/innodb/r/innodb-index.result
index a76837b91a2..1ebed058aaf 100644
--- a/mysql-test/suite/innodb/r/innodb-index.result
+++ b/mysql-test/suite/innodb/r/innodb-index.result
@@ -853,10 +853,10 @@ test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain select * from t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 4 NULL 2 Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2
explain select * from t1 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 4 NULL 2 Using index
+1 SIMPLE t1 index NULL PRIMARY 4 NULL 2
drop table t1;
SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;
SET FOREIGN_KEY_CHECKS=0;
diff --git a/mysql-test/suite/innodb/r/innodb-isolation.result b/mysql-test/suite/innodb/r/innodb-isolation.result
index b6e512cc6de..8ff2c7f12e9 100644
--- a/mysql-test/suite/innodb/r/innodb-isolation.result
+++ b/mysql-test/suite/innodb/r/innodb-isolation.result
@@ -971,7 +971,7 @@ id select_type table type possible_keys key key_len ref rows Extra
2 SUBQUERY t1 index NULL k2 5 NULL # Using index
EXPLAIN SELECT COUNT(*) FROM t1 WHERE c1 > (SELECT AVG(c1) FROM t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 range PRIMARY PRIMARY 4 NULL # Using where; Using index
+1 PRIMARY t1 range PRIMARY PRIMARY 4 NULL # Using where
2 SUBQUERY t1 index NULL k2 5 NULL # Using index
#
# Make all indexes in t2 obsolete to the active repeatable read transaction
diff --git a/mysql-test/suite/innodb/r/innodb-system-table-view.result b/mysql-test/suite/innodb/r/innodb-system-table-view.result
index 580ed08e424..dfe916bc6cd 100644
--- a/mysql-test/suite/innodb/r/innodb-system-table-view.result
+++ b/mysql-test/suite/innodb/r/innodb-system-table-view.result
@@ -10,7 +10,7 @@ TABLE_ID NAME FLAG N_COLS SPACE ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE
11 SYS_FOREIGN 0 7 0 Redundant 0 System
12 SYS_FOREIGN_COLS 0 7 0 Redundant 0 System
13 SYS_VIRTUAL 0 6 0 Redundant 0 System
-16 mysql/transaction_registry 33 8 3 Dynamic 0 Single
+16 mysql/transaction_registry 33 8 6 Dynamic 0 Single
SELECT table_id,pos,mtype,prtype,len,name
FROM INFORMATION_SCHEMA.INNODB_SYS_COLUMNS
WHERE table_id NOT IN (@table_stats_id, @index_stats_id)
@@ -77,6 +77,9 @@ test/t_dynamic test/t_dynamic 33 5 Dynamic 0
test/t_redundant test/t_redundant 0 5 Redundant 0
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+innodb_undo001 DEFAULT DEFAULT MYSQLD_DATADIR//undo001
+innodb_undo002 DEFAULT DEFAULT MYSQLD_DATADIR//undo002
+innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t_redundant DEFAULT DEFAULT MYSQLD_DATADIR/test/t_redundant.ibd
test/t_compact DEFAULT DEFAULT MYSQLD_DATADIR/test/t_compact.ibd
test/t_compressed DEFAULT 2048 MYSQLD_DATADIR/test/t_compressed.ibd
diff --git a/mysql-test/suite/innodb/r/innodb-table-online.result b/mysql-test/suite/innodb/r/innodb-table-online.result
index 5a8b3d24d75..8ac0d4d41fa 100644
--- a/mysql-test/suite/innodb/r/innodb-table-online.result
+++ b/mysql-test/suite/innodb/r/innodb-table-online.result
@@ -1,8 +1,6 @@
call mtr.add_suppression("InnoDB: Warning: Small buffer pool size");
call mtr.add_suppression("InnoDB: Error: table 'test/t1'");
call mtr.add_suppression("MariaDB is trying to open a table handle but the .ibd file for");
-SET @global_innodb_file_per_table_orig = @@global.innodb_file_per_table;
-SET GLOBAL innodb_file_per_table = on;
CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 INT NOT NULL, c3 CHAR(255) NOT NULL)
ENGINE = InnoDB;
INSERT INTO t1 VALUES (1,1,''), (2,2,''), (3,3,''), (4,4,''), (5,5,'');
@@ -472,6 +470,5 @@ DROP TABLE t1;
connection default;
SET DEBUG_SYNC=RESET;
disconnect con1;
-SET GLOBAL innodb_file_per_table = @global_innodb_file_per_table_orig;
SET GLOBAL innodb_monitor_enable = default;
SET GLOBAL innodb_monitor_disable = default;
diff --git a/mysql-test/suite/innodb/r/innodb-wl5522-debug.result b/mysql-test/suite/innodb/r/innodb-wl5522-debug.result
index e7af2d9469e..1c3b10c0597 100644
--- a/mysql-test/suite/innodb/r/innodb-wl5522-debug.result
+++ b/mysql-test/suite/innodb/r/innodb-wl5522-debug.result
@@ -12,21 +12,18 @@ call mtr.add_suppression("InnoDB: Cannot save statistics for table `test`\\.`t1`
call mtr.add_suppression("InnoDB: Database page corruption on disk or a failed read of file '.*ibdata1' page");
call mtr.add_suppression("InnoDB: File '.*ibdata1' is corrupted");
FLUSH TABLES;
-SET GLOBAL innodb_file_per_table = 1;
CREATE TABLE t1 (c1 INT) ENGINE = InnoDB;
INSERT INTO t1 VALUES(1),(2),(3);
SET SESSION debug_dbug="+d,ib_discard_before_commit_crash";
ALTER TABLE t1 DISCARD TABLESPACE;
ERROR HY000: Lost connection to server during query
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table = 1;
CREATE TABLE t1 (c1 INT) ENGINE = InnoDB;
INSERT INTO t1 VALUES(1),(2),(3);
SET SESSION debug_dbug="+d,ib_discard_after_commit_crash";
ALTER TABLE t1 DISCARD TABLESPACE;
ERROR HY000: Lost connection to server during query
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table = 1;
CREATE TABLE t1 (c1 INT) ENGINE = Innodb;
INSERT INTO t1 VALUES (1), (2), (3), (4);
FLUSH TABLES t1 FOR EXPORT;
@@ -52,8 +49,6 @@ ERROR HY000: Lost connection to server during query
unlink: t1.ibd
unlink: t1.cfg
DROP TABLE t1;
-SET @file_per_table= @@innodb_file_per_table;
-SET GLOBAL innodb_file_per_table = 1;
CREATE TABLE t1 (c1 INT) ENGINE = Innodb;
ALTER TABLE t1 DISCARD TABLESPACE;
restore: t1 .ibd and .cfg files
@@ -459,11 +454,6 @@ ALTER TABLE t1 IMPORT TABLESPACE;
ERROR HY000: Got error 42 'Tablespace not found' from ./test/t1.ibd
SET SESSION debug_dbug=@saved_debug_dbug;
restore: t1 .ibd and .cfg files
-SET SESSION debug_dbug="+d,ib_import_check_bitmap_failure";
-ALTER TABLE t1 IMPORT TABLESPACE;
-ERROR HY000: Index for table 't1' is corrupt; try to repair it
-SET SESSION debug_dbug=@saved_debug_dbug;
-restore: t1 .ibd and .cfg files
SET SESSION debug_dbug="+d,ib_import_cluster_root_adjust_failure";
ALTER TABLE t1 IMPORT TABLESPACE;
ERROR HY000: Index for table 't1' is corrupt; try to repair it
@@ -943,4 +933,3 @@ SET SESSION debug_dbug=@saved_debug_dbug;
DROP TABLE t1;
unlink: t1.ibd
unlink: t1.cfg
-SET GLOBAL INNODB_FILE_PER_TABLE=@file_per_table;
diff --git a/mysql-test/suite/innodb/r/innodb-wl5522.result b/mysql-test/suite/innodb/r/innodb-wl5522.result
index 7bcc57c5bd5..819ca949c88 100644
--- a/mysql-test/suite/innodb/r/innodb-wl5522.result
+++ b/mysql-test/suite/innodb/r/innodb-wl5522.result
@@ -120,7 +120,6 @@ ALTER TABLE t2 IMPORT TABLESPACE;
Warnings:
Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t2.cfg', will attempt to import without schema verification
DROP TABLE t2;
-SET GLOBAL innodb_file_per_table = 1;
SELECT @@innodb_file_per_table;
@@innodb_file_per_table
1
@@ -285,6 +284,8 @@ c1 c2
unlink: t1.cfg
DROP TABLE t1;
SET GLOBAL innodb_file_per_table = 0;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t1(
c1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
c2 INT) ENGINE=InnoDB;
@@ -302,6 +303,8 @@ Warning 1809 Table `test`.`t1` in system tablespace
UNLOCK TABLES;
DROP TABLE t1;
SET GLOBAL innodb_file_per_table = 1;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t1(
c1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
c2 INT, INDEX idx(c2)) ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb/r/innodb-wl5980-alter.result b/mysql-test/suite/innodb/r/innodb-wl5980-alter.result
index 4d6ac474da8..84f3d7811c2 100644
--- a/mysql-test/suite/innodb/r/innodb-wl5980-alter.result
+++ b/mysql-test/suite/innodb/r/innodb-wl5980-alter.result
@@ -2,9 +2,7 @@
# This is a copy of innodb-alter.test except using remote tablespaces
# and showing those files.
#
-SET @innodb_file_per_table_orig=@@GLOBAL.innodb_file_per_table;
SET default_storage_engine=InnoDB;
-SET GLOBAL innodb_file_per_table=ON;
SET NAMES utf8mb4;
CREATE TABLE t1 (
c1 INT PRIMARY KEY, c2 INT DEFAULT 1, ct TEXT,
@@ -1589,4 +1587,3 @@ DROP TABLE tt, t1o, sys_tables, sys_indexes, sys_foreign;
### files in MYSQL_DATA_DIR/test
db.opt
### files in MYSQL_TMP_DIR/alt_dir/test
-SET GLOBAL innodb_file_per_table = @innodb_file_per_table_orig;
diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result
index 742a822baa0..a4c1115bfd4 100644
--- a/mysql-test/suite/innodb/r/innodb.result
+++ b/mysql-test/suite/innodb/r/innodb.result
@@ -931,7 +931,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL # Using filesort
explain select a from t1 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 4 NULL # Using index
+1 SIMPLE t1 index NULL PRIMARY 4 NULL #
explain select b from t1 order by b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL b 4 NULL # Using index
@@ -1367,14 +1367,28 @@ PRIMARY KEY (`id`),
KEY `id_version` (`id_version`)
) ENGINE=InnoDB;
INSERT INTO t2 VALUES("3524", "1"),("3525", "1"),("1794", "4"),("102", "5"),("1822", "6"),("3382", "9");
+ANALYZE table t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'description'
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+explain SELECT t2.id, t1.`label` FROM t2 INNER JOIN
+(SELECT t1.id_object as id_object FROM t1 WHERE t1.`label` LIKE '%test%') AS lbl
+ON (t2.id = lbl.id_object) INNER JOIN t1 ON (t2.id = t1.id_object);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL id_object NULL NULL NULL 6 Using where
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.id_object 1
+1 SIMPLE t1 ref id_object id_object 5 test.t1.id_object 1
SELECT t2.id, t1.`label` FROM t2 INNER JOIN
(SELECT t1.id_object as id_object FROM t1 WHERE t1.`label` LIKE '%test%') AS lbl
ON (t2.id = lbl.id_object) INNER JOIN t1 ON (t2.id = t1.id_object);
id label
-3382 Test
102 Le Pekin (Test)
1794 Test de resto
1822 Test 3
+3382 Test
3524 Societe Test
3525 Fournisseur Test
drop table t1,t2;
@@ -1676,7 +1690,7 @@ count(*)
0
explain select count(*) from t1 where x > -16;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL 2 Using where; Using index
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 2 Using where
select count(*) from t1 where x > -16;
count(*)
2
diff --git a/mysql-test/suite/innodb/r/innodb_bug14147491.result b/mysql-test/suite/innodb/r/innodb_bug14147491.result
index 533eaeafeb8..b33b7f0d587 100644
--- a/mysql-test/suite/innodb/r/innodb_bug14147491.result
+++ b/mysql-test/suite/innodb/r/innodb_bug14147491.result
@@ -2,7 +2,6 @@
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
SET GLOBAL innodb_fast_shutdown=0;
# Create and populate the table to be corrupted
-set global innodb_file_per_table=ON;
CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB;
INSERT INTO t1 (b) VALUES ('corrupt me');
INSERT INTO t1 (b) VALUES ('corrupt me');
diff --git a/mysql-test/suite/innodb/r/innodb_bug30423.result b/mysql-test/suite/innodb/r/innodb_bug30423.result
index 786041370ef..8f66cedad4d 100644
--- a/mysql-test/suite/innodb/r/innodb_bug30423.result
+++ b/mysql-test/suite/innodb/r/innodb_bug30423.result
@@ -54,7 +54,7 @@ ON orgs.org_id=sa_opportunities.org_id
LEFT JOIN bug30243_2 contacts
ON orgs.org_id=contacts.org_id ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE orgs index NULL org_id 4 NULL ROWS Using index
+1 SIMPLE orgs ALL NULL NULL NULL NULL ROWS
1 SIMPLE sa_opportunities ref org_id org_id 5 test.orgs.org_id ROWS Using index
1 SIMPLE contacts ref contacts$org_id contacts$org_id 5 test.orgs.org_id ROWS Using index
select @@innodb_stats_method;
@@ -83,7 +83,7 @@ ON orgs.org_id=sa_opportunities.org_id
LEFT JOIN bug30243_2 contacts
ON orgs.org_id=contacts.org_id;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE orgs index NULL org_id 4 NULL ROWS Using index
+1 SIMPLE orgs ALL NULL NULL NULL NULL ROWS
1 SIMPLE sa_opportunities ref org_id org_id 5 test.orgs.org_id ROWS Using index
1 SIMPLE contacts ref contacts$org_id contacts$org_id 5 test.orgs.org_id ROWS Using index
SELECT COUNT(*) FROM table_bug30423 WHERE org_id IS NULL;
diff --git a/mysql-test/suite/innodb/r/innodb_bug30919.result b/mysql-test/suite/innodb/r/innodb_bug30919.result
index 0062df3f470..42aa4ff302b 100644
--- a/mysql-test/suite/innodb/r/innodb_bug30919.result
+++ b/mysql-test/suite/innodb/r/innodb_bug30919.result
@@ -35,10 +35,6 @@ FROM test.part_tbl; -- debug to show the problem
SET del_count = del_count - 2;
END WHILE;
END|
-Warnings:
-Level Warning
-Code 1287
-Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL test.proc_part();
internal_count del_count
999 1000
diff --git a/mysql-test/suite/innodb/r/innodb_bug51920.result b/mysql-test/suite/innodb/r/innodb_bug51920.result
index 9bc35174979..ddb9e29fab2 100644
--- a/mysql-test/suite/innodb/r/innodb_bug51920.result
+++ b/mysql-test/suite/innodb/r/innodb_bug51920.result
@@ -11,8 +11,6 @@ connection default;
SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST
WHERE INFO="UPDATE bug51920 SET i=2"
INTO @thread_id;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
KILL @thread_id;
connection con1;
Got one of the listed errors
diff --git a/mysql-test/suite/innodb/r/innodb_bug56947.result b/mysql-test/suite/innodb/r/innodb_bug56947.result
index aa922776f7b..a4ed3bfcfaa 100644
--- a/mysql-test/suite/innodb/r/innodb_bug56947.result
+++ b/mysql-test/suite/innodb/r/innodb_bug56947.result
@@ -1,4 +1,7 @@
-SET GLOBAL innodb_file_per_table=0;
+SET @save_fpt=@@GLOBAL.innodb_file_per_table;
+SET GLOBAL innodb_file_per_table=OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
create table bug56947(a int not null) engine = innodb;
SET @saved_dbug = @@SESSION.debug_dbug;
SET DEBUG_DBUG='+d,ib_rebuild_cannot_rename';
@@ -8,5 +11,7 @@ check table bug56947;
Table Op Msg_type Msg_text
test.bug56947 check status OK
drop table bug56947;
-SET @@global.innodb_file_per_table=DEFAULT;
+SET GLOBAL innodb_file_per_table=@save_fpt;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
SET debug_dbug= @saved_dbug;
diff --git a/mysql-test/suite/innodb/r/innodb_bug57252.result b/mysql-test/suite/innodb/r/innodb_bug57252.result
index 66183c2c42d..18e47900e41 100644
--- a/mysql-test/suite/innodb/r/innodb_bug57252.result
+++ b/mysql-test/suite/innodb/r/innodb_bug57252.result
@@ -1,5 +1,5 @@
cardinality
-2
+1
Table Op Msg_type Msg_text
test.bug57252 analyze status Engine-independent statistics collected
test.bug57252 analyze status OK
diff --git a/mysql-test/suite/innodb/r/innodb_bug59733.result b/mysql-test/suite/innodb/r/innodb_bug59733.result
deleted file mode 100644
index c962cdfd677..00000000000
--- a/mysql-test/suite/innodb/r/innodb_bug59733.result
+++ /dev/null
@@ -1,18 +0,0 @@
-CREATE TABLE bug59733(a INT AUTO_INCREMENT PRIMARY KEY,b CHAR(1))ENGINE=InnoDB;
-INSERT INTO bug59733 VALUES(0,'x');
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-CREATE INDEX b ON bug59733 (b);
-DELETE FROM bug59733 WHERE (a%100)=0;
-DROP INDEX b ON bug59733;
-CREATE INDEX b ON bug59733 (b);
-DROP TABLE bug59733;
diff --git a/mysql-test/suite/innodb/r/innodb_bug68148.result b/mysql-test/suite/innodb/r/innodb_bug68148.result
index 25bf7f58eb9..1c79f881bef 100644
--- a/mysql-test/suite/innodb/r/innodb_bug68148.result
+++ b/mysql-test/suite/innodb/r/innodb_bug68148.result
@@ -1,4 +1,3 @@
-set global innodb_file_per_table=1;
CREATE TABLE ref_table1 (id int(11) NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB;
CREATE TABLE ref_table2 (id int(11) NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB;
CREATE TABLE `main` (
diff --git a/mysql-test/suite/innodb/r/innodb_bulk_create_index.result b/mysql-test/suite/innodb/r/innodb_bulk_create_index.result
index ec7ce044cb7..060e9008f3f 100644
--- a/mysql-test/suite/innodb/r/innodb_bulk_create_index.result
+++ b/mysql-test/suite/innodb/r/innodb_bulk_create_index.result
@@ -532,7 +532,6 @@ END|
SELECT @@innodb_fill_factor;
@@innodb_fill_factor
100
-SET GLOBAL innodb_file_per_table=1;
CREATE TABLE t1(
class INT,
id INT,
@@ -686,7 +685,6 @@ test.t1 check status OK
SELECT CHAR_LENGTH(b) FROM t1 WHERE a=4975;
CHAR_LENGTH(b)
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table=default;
DROP PROCEDURE populate_t1;
SET GLOBAL innodb_fill_factor=10;
CREATE PROCEDURE populate_t1(load_even INT)
diff --git a/mysql-test/suite/innodb/r/innodb_bulk_create_index_debug.result b/mysql-test/suite/innodb/r/innodb_bulk_create_index_debug.result
index 295a9f1bed8..debb5d0f14a 100644
--- a/mysql-test/suite/innodb/r/innodb_bulk_create_index_debug.result
+++ b/mysql-test/suite/innodb/r/innodb_bulk_create_index_debug.result
@@ -407,7 +407,6 @@ SET i = i + 1;
END WHILE;
COMMIT;
END|
-SET GLOBAL innodb_file_per_table=1;
CREATE TABLE t1(
class INT,
id INT,
@@ -421,7 +420,6 @@ affected rows: 0
info: Records: 0 Duplicates: 0 Warnings: 0
RENAME TABLE t1 TO t0;
# Test Blob
-SET GLOBAL innodb_file_per_table=1;
CREATE TABLE t1(
a INT PRIMARY KEY,
b TEXT,
@@ -467,7 +465,6 @@ class id title
SELECT * FROM t1 WHERE title = 'a10010';
class id title
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table=1;
CREATE TABLE t1(
class INT,
id INT,
@@ -501,7 +498,6 @@ SELECT * FROM t1 WHERE title = 'a10010';
class id title
DROP TABLE t1;
# Test Blob
-SET GLOBAL innodb_file_per_table=1;
CREATE TABLE t1(
a INT PRIMARY KEY,
b TEXT,
diff --git a/mysql-test/suite/innodb/r/innodb_bulk_create_index_small.result b/mysql-test/suite/innodb/r/innodb_bulk_create_index_small.result
index b48207d4497..bdc40d1a070 100644
--- a/mysql-test/suite/innodb/r/innodb_bulk_create_index_small.result
+++ b/mysql-test/suite/innodb/r/innodb_bulk_create_index_small.result
@@ -72,8 +72,6 @@ test.t1 check status OK
SELECT CHAR_LENGTH(b) FROM t1 WHERE a=4975;
CHAR_LENGTH(b)
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table=default;
-SET GLOBAL innodb_file_per_table=1;
CREATE TABLE t1(
class INT,
id INT,
@@ -135,5 +133,4 @@ test.t1 check status OK
SELECT CHAR_LENGTH(b) FROM t1 WHERE a=4975;
CHAR_LENGTH(b)
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table=default;
DROP PROCEDURE populate_t1;
diff --git a/mysql-test/suite/innodb/r/innodb_defrag_concurrent.result b/mysql-test/suite/innodb/r/innodb_defrag_concurrent.result
index 07c96e76213..72d520a61b4 100644
--- a/mysql-test/suite/innodb/r/innodb_defrag_concurrent.result
+++ b/mysql-test/suite/innodb/r/innodb_defrag_concurrent.result
@@ -3,6 +3,8 @@ SET @accuracy= @@GLOBAL.innodb_defragment_stats_accuracy;
SET @sp= @@GLOBAL.innodb_stats_persistent;
SET GLOBAL innodb_stats_persistent = 0;
set global innodb_defragment_stats_accuracy = 80;
+Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
b VARCHAR(256),
c INT,
@@ -18,6 +20,8 @@ connect con3,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK;
connect con4,localhost,root,,test,$MASTER_MYPORT,$MASTER_MYSOCK;
connection default;
SET @@global.innodb_defragment_n_pages = 20;
+Warnings:
+Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
CREATE TEMPORARY TABLE tt (a INT, KEY(a)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
INSERT INTO tt SELECT 0 FROM seq_1_to_180;
INSERT INTO tt SELECT 5 FROM seq_1_to_160;
@@ -98,5 +102,9 @@ count(stat_value) > 0
1
drop table t1;
SET GLOBAL innodb_defragment_n_pages = @n_pages;
+Warnings:
+Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
SET GLOBAL innodb_defragment_stats_accuracy = @accuracy;
+Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
SET GLOBAL innodb_stats_persistent = @sp;
diff --git a/mysql-test/suite/innodb/r/innodb_defrag_stats.result b/mysql-test/suite/innodb/r/innodb_defrag_stats.result
index c6fd7006f9e..1753ce36bed 100644
--- a/mysql-test/suite/innodb/r/innodb_defrag_stats.result
+++ b/mysql-test/suite/innodb/r/innodb_defrag_stats.result
@@ -1,4 +1,6 @@
SET GLOBAL innodb_defragment_stats_accuracy = 20;
+Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
DELETE FROM mysql.innodb_index_stats;
# Create table.
CREATE TABLE t1 (a INT PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256),
@@ -77,6 +79,8 @@ t1 SECOND n_leaf_pages_reserved
t1 SECOND n_page_split
t1 SECOND n_pages_freed
set global innodb_defragment_stats_accuracy = 40;
+Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
INSERT INTO t1 (b) SELECT b from t1;
SELECT table_name, index_name, stat_name FROM mysql.innodb_index_stats;
table_name index_name stat_name
@@ -116,6 +120,8 @@ drop index SECOND on t2;
# MDEV-26636: Statistics must not be written for temporary tables
#
SET GLOBAL innodb_defragment_stats_accuracy = 1;
+Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
CREATE TEMPORARY TABLE t (a INT PRIMARY KEY, c CHAR(255) NOT NULL)
ENGINE=InnoDB;
INSERT INTO t SELECT seq, '' FROM seq_1_to_100;
diff --git a/mysql-test/suite/innodb/r/innodb_defrag_stats_many_tables.result b/mysql-test/suite/innodb/r/innodb_defrag_stats_many_tables.result
index e668c38e059..37049b6386e 100644
--- a/mysql-test/suite/innodb/r/innodb_defrag_stats_many_tables.result
+++ b/mysql-test/suite/innodb/r/innodb_defrag_stats_many_tables.result
@@ -5,6 +5,8 @@ SET @start_flush_log_at_trx_commit = @@global.innodb_flush_log_at_trx_commit;
SET @@global.innodb_flush_log_at_trx_commit=2;
SET @start_innodb_defragment_stats_accuracy = @@global.innodb_defragment_stats_accuracy;
SET @@global.innodb_defragment_stats_accuracy = 80;
+Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), KEY SECOND(a, b)) ENGINE=INNODB;
INSERT INTO t1 VALUES(1, REPEAT('A', 256));
INSERT INTO t1 (b) SELECT b from t1;
@@ -34,5 +36,7 @@ sleep(15)
select stat_value > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name = 'n_page_split';
stat_value > 0
SET @@global.innodb_defragment_stats_accuracy = @start_innodb_defragment_stats_accuracy;
+Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
SET @@global.table_definition_cache = @start_table_definition_cache;
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb_defragment.result b/mysql-test/suite/innodb/r/innodb_defragment.result
index 533a39eec9d..72471060668 100644
--- a/mysql-test/suite/innodb/r/innodb_defragment.result
+++ b/mysql-test/suite/innodb/r/innodb_defragment.result
@@ -1,4 +1,6 @@
set global innodb_defragment_stats_accuracy = 80;
+Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(256), KEY SECOND(a, b)) ENGINE=INNODB;
optimize table t1;
Table Op Msg_type Msg_text
@@ -66,6 +68,8 @@ select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like
count(stat_value) = 0
1
SET @@global.innodb_defragment_n_pages = 3;
+Warnings:
+Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
@@ -104,6 +108,8 @@ select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like
count(stat_value) = 0
1
SET @@global.innodb_defragment_n_pages = 10;
+Warnings:
+Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
diff --git a/mysql-test/suite/innodb/r/innodb_defragment_small.result b/mysql-test/suite/innodb/r/innodb_defragment_small.result
index fcb3bf2e07f..6b80637f3f8 100644
--- a/mysql-test/suite/innodb/r/innodb_defragment_small.result
+++ b/mysql-test/suite/innodb/r/innodb_defragment_small.result
@@ -1,6 +1,8 @@
SET @innodb_defragment_orig=@@GLOBAL.innodb_defragment;
SET @innodb_optimize_fulltext_orig=@@GLOBAL.innodb_optimize_fulltext_only;
SET GLOBAL innodb_defragment = 1;
+Warnings:
+Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
SET GLOBAL innodb_optimize_fulltext_only = 0;
#
# MDEV-12198 innodb_defragment=1 crashes server on
@@ -27,6 +29,8 @@ OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
SET GLOBAL innodb_defragment = 0;
+Warnings:
+Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
@@ -37,4 +41,6 @@ Table Op Msg_type Msg_text
test.t1 optimize status OK
DROP TABLE t1;
SET GLOBAL innodb_defragment = @innodb_defragment_orig;
+Warnings:
+Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
SET GLOBAL innodb_optimize_fulltext_only = @innodb_optimize_fulltext_orig;
diff --git a/mysql-test/suite/innodb/r/innodb_mysql.result b/mysql-test/suite/innodb/r/innodb_mysql.result
index aa8cc118ce6..7d5dc38f52c 100644
--- a/mysql-test/suite/innodb/r/innodb_mysql.result
+++ b/mysql-test/suite/innodb/r/innodb_mysql.result
@@ -191,8 +191,8 @@ min(7)
7
explain select min(7) from t2i join t1i;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1i ALL NULL NULL NULL NULL 0
-1 SIMPLE t2i ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
+1 SIMPLE t2i ALL NULL NULL NULL NULL 1
+1 SIMPLE t1i ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
select min(7) from t2i join t1i;
min(7)
NULL
@@ -207,8 +207,8 @@ max(7)
7
explain select max(7) from t2i join t1i;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1i ALL NULL NULL NULL NULL 0
-1 SIMPLE t2i ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
+1 SIMPLE t2i ALL NULL NULL NULL NULL 1
+1 SIMPLE t1i ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
select max(7) from t2i join t1i;
max(7)
NULL
@@ -239,7 +239,7 @@ select 1, max(1) from t1i where 1=99;
explain select count(*), min(7), max(7) from t1m, t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1m system NULL NULL NULL NULL 0 Const row not found
-1 SIMPLE t1i ALL NULL NULL NULL NULL 0
+1 SIMPLE t1i ALL NULL NULL NULL NULL 1
select count(*), min(7), max(7) from t1m, t1i;
count(*) min(7) max(7)
0 NULL NULL
@@ -253,7 +253,7 @@ count(*) min(7) max(7)
explain select count(*), min(7), max(7) from t2m, t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2m system NULL NULL NULL NULL 1
-1 SIMPLE t1i ALL NULL NULL NULL NULL 0
+1 SIMPLE t1i ALL NULL NULL NULL NULL 1
select count(*), min(7), max(7) from t2m, t1i;
count(*) min(7) max(7)
0 NULL NULL
@@ -346,10 +346,10 @@ insert into t1 values ( 1,"e"),(2,"a"),( 3,"c"),(4,"d");
alter table t1 drop primary key, add primary key (f2, f1);
explain select distinct f1 a, f1 b from t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 5 NULL 4 Using index; Using temporary
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4 Using temporary
explain select distinct f1, f2 from t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 5 NULL 4 Using index
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4
drop table t1;
CREATE TABLE t1 (id int(11) NOT NULL PRIMARY KEY, name varchar(20),
INDEX (name));
@@ -396,7 +396,7 @@ test.t1 analyze status OK
flush tables;
EXPLAIN SELECT DISTINCT t1.name, t1.dept FROM t1 WHERE t1.name='rs5';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref name name 22 const 2 Using where; Using index
+1 SIMPLE t1 range name name 44 NULL 2 Using where; Using index for group-by
SELECT DISTINCT t1.name, t1.dept FROM t1 WHERE t1.name='rs5';
name dept
rs5 cs10
@@ -405,7 +405,7 @@ DELETE FROM t1;
# Masking (#) number in "rows" column of the following EXPLAIN output, as it may vary (bug#47746).
EXPLAIN SELECT DISTINCT t1.name, t1.dept FROM t1 WHERE t1.name='rs5';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref name name 22 const # Using where; Using index
+1 SIMPLE t1 range name name 44 NULL # Using where; Using index for group-by
SELECT DISTINCT t1.name, t1.dept FROM t1 WHERE t1.name='rs5';
name dept
DROP TABLE t1;
@@ -1193,7 +1193,7 @@ INSERT INTO t1 SELECT a + 32, MOD(a + 32, 20), 1 FROM t1;
INSERT INTO t1 SELECT a + 64, MOD(a + 64, 20), 1 FROM t1;
EXPLAIN SELECT b, SUM(c) FROM t1 GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL b 5 NULL 128
+1 SIMPLE t1 ALL NULL NULL NULL NULL 128 Using temporary; Using filesort
EXPLAIN SELECT SQL_BIG_RESULT b, SUM(c) FROM t1 GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 128 Using filesort
@@ -1312,13 +1312,13 @@ EXPLAIN SELECT * FROM t1 WHERE b BETWEEN 1 AND 2 ORDER BY a;
id 1
select_type SIMPLE
table t1
-type range
+type index
possible_keys bkey
-key bkey
-key_len 5
+key PRIMARY
+key_len 4
ref NULL
rows 32
-Extra Using where; Using index; Using filesort
+Extra Using where
SELECT * FROM t1 WHERE b BETWEEN 1 AND 2 ORDER BY a;
a b
1 2
@@ -1407,13 +1407,13 @@ EXPLAIN SELECT * FROM t2 WHERE b=1 ORDER BY a;
id 1
select_type SIMPLE
table t2
-type ref
+type index
possible_keys bkey
-key bkey
-key_len 5
-ref const
+key PRIMARY
+key_len 4
+ref NULL
rows 16
-Extra Using where; Using index; Using filesort
+Extra Using where
SELECT * FROM t2 WHERE b=1 ORDER BY a;
a b c
1 1 1
@@ -1629,7 +1629,7 @@ c b d
3 2 40
EXPLAIN SELECT c,b FROM t1 GROUP BY c,b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL c 8 NULL 3 Using index
+1 SIMPLE t1 index NULL c 8 NULL 3
SELECT c,b FROM t1 GROUP BY c,b;
c b
1 1
@@ -1787,7 +1787,7 @@ INSERT INTO t1 VALUES
(191, 'member', 1), (NULL, 'member', 3), (NULL, 'member', 4), (201, 'member', 2);
EXPLAIN SELECT * FROM t1 WHERE id=191 OR id IS NULL ORDER BY d;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL idx NULL NULL NULL 4 Using where; Using filesort
+1 SIMPLE t1 ref_or_null idx idx 5 const 3 Using index condition; Using filesort
SELECT * FROM t1 WHERE id=191 OR id IS NULL ORDER BY d;
id type d
191 member 1
@@ -1797,13 +1797,16 @@ DROP TABLE t1;
set @my_innodb_autoextend_increment=@@global.innodb_autoextend_increment;
set global innodb_autoextend_increment=8;
set global innodb_autoextend_increment=@my_innodb_autoextend_increment;
+#
+# Bug #37830: ORDER BY ASC/DESC - no difference
+#
CREATE TABLE t1 (a int, b int, c int, PRIMARY KEY (a), KEY t1_b (b))
ENGINE=InnoDB;
-INSERT INTO t1 (a,b,c) VALUES (1,1,1), (2,1,1), (3,1,1), (4,1,1);
+INSERT INTO t1 (a,b,c) VALUES (1,1,1), (2,1,1), (3,1,1), (4,1,1), (100,2,2);
INSERT INTO t1 (a,b,c) SELECT a+4,b,c FROM t1;
EXPLAIN SELECT a, b, c FROM t1 WHERE b = 1 ORDER BY a DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range t1_b t1_b 5 NULL 8 Using where
+1 SIMPLE t1 ref t1_b t1_b 5 const 8 Using where
SELECT a, b, c FROM t1 WHERE b = 1 ORDER BY a DESC LIMIT 5;
a b c
8 1 1
@@ -2342,24 +2345,24 @@ EXPLAIN SELECT c FROM bar WHERE b>2;;
id 1
select_type SIMPLE
table bar
-type ALL
+type range
possible_keys b
-key NULL
-key_len NULL
+key b
+key_len 5
ref NULL
-rows 6
-Extra Using where
+rows 5
+Extra Using index condition
EXPLAIN SELECT c FROM foo WHERE b>2;;
id 1
select_type SIMPLE
table foo
-type ALL
+type range
possible_keys b
-key NULL
-key_len NULL
+key b
+key_len 5
ref NULL
-rows 6
-Extra Using where
+rows 5
+Extra Using index condition
EXPLAIN SELECT c FROM foo2 WHERE b>2;;
id 1
select_type SIMPLE
@@ -2970,7 +2973,7 @@ NULL 75
EXPLAIN SELECT t1.id,t2.id FROM t2 LEFT JOIN t1 ON t1.id>=74 AND t1.id<=0
WHERE t2.id=75 AND t1.id IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 const PRIMARY NULL NULL NULL 1 Impossible ON condition
+1 SIMPLE t1 const PRIMARY NULL NULL NULL 0 Impossible ON condition
1 SIMPLE t2 ALL NULL NULL NULL NULL 3 Using where
DROP TABLE t1,t2;
#
@@ -3066,7 +3069,7 @@ f1 f2 f3 f4
EXPLAIN SELECT * FROM t1 WHERE f2 = 1 AND f4 = TRUE
ORDER BY f1 DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range f2,f4 f4 1 NULL 22 Using where
+1 SIMPLE t1 index f2,f4 PRIMARY 4 NULL 5 Using where
DROP TABLE t1;
#
# Bug#54117 crash in thr_multi_unlock, temporary table
@@ -3114,8 +3117,8 @@ select_type SIMPLE
table t1
type index
possible_keys NULL
-key PRIMARY
-key_len 8
+key b
+key_len 13
ref NULL
rows 3
Extra Using index
@@ -3127,8 +3130,8 @@ select_type SIMPLE
table t1
type index
possible_keys NULL
-key PRIMARY
-key_len 8
+key b
+key_len 18
ref NULL
rows 3
Extra Using index
@@ -3291,7 +3294,7 @@ SELECT t2.b FROM t1,t2 WHERE t1.a IN (SELECT 1 FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 1
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY t2 index NULL PRIMARY 4 NULL 1 Using index; FirstMatch(t1); Using join buffer (incremental, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 1 FirstMatch(t1); Using join buffer (incremental, BNL join)
SELECT t2.b FROM t1,t2 WHERE t1.a IN (SELECT 1 FROM t2);
b
1
diff --git a/mysql-test/suite/innodb/r/innodb_scrub.result b/mysql-test/suite/innodb/r/innodb_scrub.result
index b4a418ce2ad..f824a1d0858 100644
--- a/mysql-test/suite/innodb/r/innodb_scrub.result
+++ b/mysql-test/suite/innodb/r/innodb_scrub.result
@@ -18,9 +18,13 @@ SET @scrub= @@GLOBAL.innodb_immediate_scrub_data_uncompressed;
SET GLOBAL innodb_immediate_scrub_data_uncompressed= 1;
SET @fpt=@@GLOBAL.innodb_file_per_table;
SET GLOBAL innodb_file_per_table=0;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t ENGINE=InnoDB AS SELECT 1;
DROP TABLE t;
SET GLOBAL innodb_file_per_table=@fpt;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TEMPORARY TABLE tmp ENGINE=InnoDB AS SELECT 1;
DROP TABLE tmp;
SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED= @scrub;
diff --git a/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result b/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result
index e919f0dee0b..b51a7fb3f6d 100644
--- a/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result
+++ b/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result
@@ -116,12 +116,8 @@ buffer_LRU_unzip_search_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL N
buffer_LRU_unzip_search_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 set_member Page scanned per single LRU unzip search
buffer_page_read_index_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Leaf Pages read
buffer_page_read_index_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Non-leaf Pages read
-buffer_page_read_index_ibuf_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Index Leaf Pages read
-buffer_page_read_index_ibuf_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Index Non-Leaf Pages read
buffer_page_read_undo_log buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Undo Log Pages read
buffer_page_read_index_inode buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Inode Pages read
-buffer_page_read_ibuf_free_list buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Free List Pages read
-buffer_page_read_ibuf_bitmap buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Bitmap Pages read
buffer_page_read_system_page buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of System Pages read
buffer_page_read_trx_system buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Transaction System Pages read
buffer_page_read_fsp_hdr buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of File Space Header Pages read
@@ -132,12 +128,8 @@ buffer_page_read_zblob2 buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NU
buffer_page_read_other buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of other/unknown (old version of InnoDB) Pages read
buffer_page_written_index_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Leaf Pages written
buffer_page_written_index_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Non-leaf Pages written
-buffer_page_written_index_ibuf_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Index Leaf Pages written
-buffer_page_written_index_ibuf_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Index Non-Leaf Pages written
buffer_page_written_undo_log buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Undo Log Pages written
buffer_page_written_index_inode buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Inode Pages written
-buffer_page_written_ibuf_free_list buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Free List Pages written
-buffer_page_written_ibuf_bitmap buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Bitmap Pages written
buffer_page_written_system_page buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of System Pages written
buffer_page_written_trx_system buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Transaction System Pages written
buffer_page_written_fsp_hdr buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of File Space Header Pages written
@@ -205,14 +197,6 @@ adaptive_hash_rows_removed adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL
adaptive_hash_rows_deleted_no_hash_entry adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of rows deleted that did not have corresponding Adaptive Hash Index entries
adaptive_hash_rows_updated adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Adaptive Hash Index rows updated
file_num_open_files file_system 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 value Number of files currently open (innodb_num_open_files)
-ibuf_merges_insert change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of inserted records merged by change buffering
-ibuf_merges_delete_mark change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of deleted records merged by change buffering
-ibuf_merges_delete change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of purge records merged by change buffering
-ibuf_merges_discard_insert change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of insert merged operations discarded
-ibuf_merges_discard_delete_mark change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of deleted merged operations discarded
-ibuf_merges_discard_delete change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of purge merged operations discarded
-ibuf_merges change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of change buffer merges
-ibuf_size change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Change buffer size in pages
innodb_master_thread_sleeps server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of times (seconds) master thread sleeps
innodb_activity_count server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Current server activity count
innodb_master_active_loops server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of times master thread performs its tasks when server is active
diff --git a/mysql-test/suite/innodb/r/innodb_stats.result b/mysql-test/suite/innodb/r/innodb_stats.result
index b300af767c6..cb5247f081e 100644
--- a/mysql-test/suite/innodb/r/innodb_stats.result
+++ b/mysql-test/suite/innodb/r/innodb_stats.result
@@ -151,7 +151,7 @@ INDEX_NAME a_key
SEQ_IN_INDEX 1
COLUMN_NAME a
COLLATION A
-CARDINALITY 2
+CARDINALITY 1
SUB_PART NULL
PACKED NULL
NULLABLE YES
@@ -204,7 +204,7 @@ INDEX_NAME a_key
SEQ_IN_INDEX 1
COLUMN_NAME a
COLLATION A
-CARDINALITY 3
+CARDINALITY 1
SUB_PART NULL
PACKED NULL
NULLABLE YES
@@ -257,7 +257,7 @@ INDEX_NAME a_key
SEQ_IN_INDEX 1
COLUMN_NAME a
COLLATION A
-CARDINALITY 2
+CARDINALITY 1
SUB_PART NULL
PACKED NULL
NULLABLE YES
@@ -522,7 +522,7 @@ INDEX_NAME a_key
SEQ_IN_INDEX 1
COLUMN_NAME a
COLLATION A
-CARDINALITY 10
+CARDINALITY 5
SUB_PART NULL
PACKED NULL
NULLABLE YES
diff --git a/mysql-test/suite/innodb/r/innodb_stats_fetch.result b/mysql-test/suite/innodb/r/innodb_stats_fetch.result
index d7b7d78ec71..6df1831db48 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_fetch.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_fetch.result
@@ -131,16 +131,16 @@ FROM information_schema.statistics WHERE table_name = 'test_ps_fetch'
ORDER BY index_name, seq_in_index;
seq_in_index 1
column_name c
-cardinality 6
+cardinality 3
seq_in_index 2
column_name d
-cardinality 22
+cardinality 11
seq_in_index 1
column_name a
-cardinality 40
+cardinality 20
seq_in_index 2
column_name b
-cardinality 200
+cardinality 90
SELECT
table_rows, avg_row_length, max_data_length, index_length
FROM information_schema.tables WHERE table_name = 'test_ps_fetch';
diff --git a/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result b/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result
index 1f8471304d5..b1e6bb7fa72 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result
@@ -24,7 +24,7 @@ FROM information_schema.statistics WHERE table_name = 'test_ps_fetch_corrupted'
ORDER BY index_name, seq_in_index;
seq_in_index 1
column_name a
-cardinality 0
+cardinality 1
SELECT table_rows, avg_row_length, max_data_length, index_length
FROM information_schema.tables WHERE table_name = 'test_ps_fetch_corrupted';
table_rows 0
@@ -38,7 +38,7 @@ FROM information_schema.statistics WHERE table_name = 'test_ps_fetch_corrupted'
ORDER BY index_name, seq_in_index;
seq_in_index 1
column_name a
-cardinality 0
+cardinality 1
SELECT table_rows, avg_row_length, max_data_length, index_length
FROM information_schema.tables WHERE table_name = 'test_ps_fetch_corrupted';
table_rows 0
diff --git a/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result b/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result
index 91bb2bf3ecd..a6627417d1a 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result
@@ -22,7 +22,7 @@ FROM information_schema.statistics WHERE table_name = 'test_ps_fetch_nonexistent
ORDER BY index_name, seq_in_index;
seq_in_index 1
column_name a
-cardinality 0
+cardinality 1
SELECT table_rows, avg_row_length, max_data_length, index_length
FROM information_schema.tables WHERE table_name = 'test_ps_fetch_nonexistent';
table_rows 0
diff --git a/mysql-test/suite/innodb/r/innodb_status_variables.result b/mysql-test/suite/innodb/r/innodb_status_variables.result
index 3a86271c226..a42f18ddd5b 100644
--- a/mysql-test/suite/innodb/r/innodb_status_variables.result
+++ b/mysql-test/suite/innodb/r/innodb_status_variables.result
@@ -45,16 +45,6 @@ INNODB_DBLWR_PAGES_WRITTEN
INNODB_DBLWR_WRITES
INNODB_DEADLOCKS
INNODB_HISTORY_LIST_LENGTH
-INNODB_IBUF_DISCARDED_DELETE_MARKS
-INNODB_IBUF_DISCARDED_DELETES
-INNODB_IBUF_DISCARDED_INSERTS
-INNODB_IBUF_FREE_LIST
-INNODB_IBUF_MERGED_DELETE_MARKS
-INNODB_IBUF_MERGED_DELETES
-INNODB_IBUF_MERGED_INSERTS
-INNODB_IBUF_MERGES
-INNODB_IBUF_SEGMENT_SIZE
-INNODB_IBUF_SIZE
INNODB_LOG_WAITS
INNODB_LOG_WRITE_REQUESTS
INNODB_LOG_WRITES
diff --git a/mysql-test/suite/innodb/r/insert_debug.result b/mysql-test/suite/innodb/r/insert_debug.result
index 3ac9df58c16..cafab4aa978 100644
--- a/mysql-test/suite/innodb/r/insert_debug.result
+++ b/mysql-test/suite/innodb/r/insert_debug.result
@@ -2,12 +2,10 @@
# Bug#19904003 INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=1
# CAUSES INFINITE PAGE SPLIT
#
-SET GLOBAL innodb_change_buffering_debug=1;
SET GLOBAL innodb_limit_optimistic_insert_debug=1;
CREATE TABLE t1(c1 INT PRIMARY KEY) ENGINE=InnoDB
PARTITION BY HASH (c1) PARTITIONS 15;
DROP TABLE t1;
-SET GLOBAL innodb_change_buffering_debug=0;
SET GLOBAL innodb_limit_optimistic_insert_debug=0;
#
# Bug#25082593 FOREIGN KEY VALIDATION DOESN'T NEED
diff --git a/mysql-test/suite/innodb/r/instant_alter_debug.result b/mysql-test/suite/innodb/r/instant_alter_debug.result
index 7205fc48b50..6efe5d7c734 100644
--- a/mysql-test/suite/innodb/r/instant_alter_debug.result
+++ b/mysql-test/suite/innodb/r/instant_alter_debug.result
@@ -279,10 +279,14 @@ SET GLOBAL innodb_limit_optimistic_insert_debug = @old_limit;
ALTER TABLE t1 ADD COLUMN b INT, ALGORITHM=INSTANT;
SET @old_defragment = @@innodb_defragment;
SET GLOBAL innodb_defragment = 1;
+Warnings:
+Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
SET GLOBAL innodb_defragment = @old_defragment;
+Warnings:
+Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
ALTER TABLE t1 ADD vb INT AS (b) VIRTUAL;
CHECK TABLE t1;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/innodb/r/instant_alter_limit,16k.rdiff b/mysql-test/suite/innodb/r/instant_alter_limit,16k.rdiff
index 57d87f7172f..320df318434 100644
--- a/mysql-test/suite/innodb/r/instant_alter_limit,16k.rdiff
+++ b/mysql-test/suite/innodb/r/instant_alter_limit,16k.rdiff
@@ -1,5 +1,5 @@
---- instant_alter_limit.result 2020-05-26 18:01:27.377946439 +0530
-+++ instant_alter_limit,16k.reject 2020-05-26 20:14:38.452463919 +0530
+--- instant_alter_limit.result
++++ instant_alter_limit,16k.reject
@@ -45,3 +45,10 @@
instants
502
diff --git a/mysql-test/suite/innodb/r/instant_alter_limit,32k.rdiff b/mysql-test/suite/innodb/r/instant_alter_limit,32k.rdiff
index 8f8cf64b7fc..951f0ce2320 100644
--- a/mysql-test/suite/innodb/r/instant_alter_limit,32k.rdiff
+++ b/mysql-test/suite/innodb/r/instant_alter_limit,32k.rdiff
@@ -1,5 +1,5 @@
---- instant_alter_limit.result 2020-05-26 18:01:27.377946439 +0530
-+++ instant_alter_limit,32k.reject 2020-05-26 19:59:19.743877366 +0530
+--- instant_alter_limit.result
++++ instant_alter_limit,32k.reject
@@ -43,5 +43,12 @@
FROM information_schema.global_status
WHERE variable_name = 'innodb_instant_alter_column';
diff --git a/mysql-test/suite/innodb/r/instant_alter_limit,4k.rdiff b/mysql-test/suite/innodb/r/instant_alter_limit,4k.rdiff
index dad28218a02..0ebd590ad54 100644
--- a/mysql-test/suite/innodb/r/instant_alter_limit,4k.rdiff
+++ b/mysql-test/suite/innodb/r/instant_alter_limit,4k.rdiff
@@ -1,5 +1,5 @@
---- instant_alter_limit.result 2020-05-26 18:01:27.377946439 +0530
-+++ instant_alter_limit,4k.reject 2020-05-26 20:17:53.314736548 +0530
+--- instant_alter_limit.result
++++ instant_alter_limit,4k.reject
@@ -5,6 +5,276 @@
ENGINE=InnoDB;
INSERT INTO t VALUES(1,2,3,4,5);
diff --git a/mysql-test/suite/innodb/r/instant_alter_limit,64k.rdiff b/mysql-test/suite/innodb/r/instant_alter_limit,64k.rdiff
index d7479dbba40..7c58fa4a8db 100644
--- a/mysql-test/suite/innodb/r/instant_alter_limit,64k.rdiff
+++ b/mysql-test/suite/innodb/r/instant_alter_limit,64k.rdiff
@@ -1,5 +1,5 @@
---- instant_alter_limit.result 2020-05-26 18:01:27.377946439 +0530
-+++ instant_alter_limit,64k.reject 2020-05-26 20:00:22.499711222 +0530
+--- instant_alter_limit.result
++++ instant_alter_limit,64k.reject
@@ -43,5 +43,12 @@
FROM information_schema.global_status
WHERE variable_name = 'innodb_instant_alter_column';
diff --git a/mysql-test/suite/innodb/r/instant_alter_limit,8k.rdiff b/mysql-test/suite/innodb/r/instant_alter_limit,8k.rdiff
index 1fe3e1a56eb..d70156f3083 100644
--- a/mysql-test/suite/innodb/r/instant_alter_limit,8k.rdiff
+++ b/mysql-test/suite/innodb/r/instant_alter_limit,8k.rdiff
@@ -1,5 +1,5 @@
---- instant_alter_limit.result 2020-05-26 18:01:27.377946439 +0530
-+++ instant_alter_limit,8k.reject 2020-05-26 20:19:50.881869095 +0530
+--- instant_alter_limit.result
++++ instant_alter_limit,8k.reject
@@ -5,6 +5,28 @@
ENGINE=InnoDB;
INSERT INTO t VALUES(1,2,3,4,5);
diff --git a/mysql-test/suite/innodb/r/log_data_file_size.result b/mysql-test/suite/innodb/r/log_data_file_size.result
index d33752b089c..ddb8e694e40 100644
--- a/mysql-test/suite/innodb/r/log_data_file_size.result
+++ b/mysql-test/suite/innodb/r/log_data_file_size.result
@@ -1,6 +1,10 @@
SET GLOBAL innodb_file_per_table=0;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t(a INT)ENGINE=InnoDB;
SET GLOBAL innodb_file_per_table=1;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE ibd4(a INT UNIQUE)ENGINE=InnoDB;
CREATE TABLE ibd4f(a INT UNIQUE)ENGINE=InnoDB;
CREATE TABLE ibd5(a INT UNIQUE, b INT UNIQUE)ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb/r/log_file_name.result b/mysql-test/suite/innodb/r/log_file_name.result
index 766122959ab..7ac212916de 100644
--- a/mysql-test/suite/innodb/r/log_file_name.result
+++ b/mysql-test/suite/innodb/r/log_file_name.result
@@ -1,4 +1,3 @@
-SET GLOBAL innodb_file_per_table=ON;
FLUSH TABLES;
CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB;
# restart
diff --git a/mysql-test/suite/innodb/r/max_record_size,16k,compact,innodb.rdiff b/mysql-test/suite/innodb/r/max_record_size,16k,compact,innodb.rdiff
index 118145bec2e..b9f51624729 100644
--- a/mysql-test/suite/innodb/r/max_record_size,16k,compact,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,16k,compact,innodb.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:51:26.070418078 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -3,45 +3,65 @@
c1 CHAR(255), c2 CHAR(255), c3 CHAR(255), c4 CHAR(255),
c5 CHAR(255), c6 CHAR(255), c7 CHAR(255), c8 CHAR(255),
diff --git a/mysql-test/suite/innodb/r/max_record_size,16k,dynamic,innodb.rdiff b/mysql-test/suite/innodb/r/max_record_size,16k,dynamic,innodb.rdiff
index 33067866b82..5effc664f2c 100644
--- a/mysql-test/suite/innodb/r/max_record_size,16k,dynamic,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,16k,dynamic,innodb.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:52:54.580956978 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -3,45 +3,207 @@
c1 CHAR(255), c2 CHAR(255), c3 CHAR(255), c4 CHAR(255),
c5 CHAR(255), c6 CHAR(255), c7 CHAR(255), c8 CHAR(255),
diff --git a/mysql-test/suite/innodb/r/max_record_size,16k,innodb,redundant.rdiff b/mysql-test/suite/innodb/r/max_record_size,16k,innodb,redundant.rdiff
index e50e4d2be3a..227b64a8a48 100644
--- a/mysql-test/suite/innodb/r/max_record_size,16k,innodb,redundant.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,16k,innodb,redundant.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:55:05.258762945 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -3,45 +3,65 @@
c1 CHAR(255), c2 CHAR(255), c3 CHAR(255), c4 CHAR(255),
c5 CHAR(255), c6 CHAR(255), c7 CHAR(255), c8 CHAR(255),
diff --git a/mysql-test/suite/innodb/r/max_record_size,32k,compact,innodb.rdiff b/mysql-test/suite/innodb/r/max_record_size,32k,compact,innodb.rdiff
index 1bc03a5d97a..d236e12b1e1 100644
--- a/mysql-test/suite/innodb/r/max_record_size,32k,compact,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,32k,compact,innodb.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:27:08.004932026 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -3,45 +3,104 @@
c1 CHAR(255), c2 CHAR(255), c3 CHAR(255), c4 CHAR(255),
c5 CHAR(255), c6 CHAR(255), c7 CHAR(255), c8 CHAR(255),
diff --git a/mysql-test/suite/innodb/r/max_record_size,32k,dynamic,innodb.rdiff b/mysql-test/suite/innodb/r/max_record_size,32k,dynamic,innodb.rdiff
index 71c4c2e41b7..ce9462b6900 100644
--- a/mysql-test/suite/innodb/r/max_record_size,32k,dynamic,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,32k,dynamic,innodb.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:38:41.609328820 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -3,45 +3,351 @@
c1 CHAR(255), c2 CHAR(255), c3 CHAR(255), c4 CHAR(255),
c5 CHAR(255), c6 CHAR(255), c7 CHAR(255), c8 CHAR(255),
diff --git a/mysql-test/suite/innodb/r/max_record_size,32k,innodb,redundant.rdiff b/mysql-test/suite/innodb/r/max_record_size,32k,innodb,redundant.rdiff
index e42b3de8845..53d6f76a2d2 100644
--- a/mysql-test/suite/innodb/r/max_record_size,32k,innodb,redundant.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,32k,innodb,redundant.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:41:03.319664978 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -3,45 +3,104 @@
c1 CHAR(255), c2 CHAR(255), c3 CHAR(255), c4 CHAR(255),
c5 CHAR(255), c6 CHAR(255), c7 CHAR(255), c8 CHAR(255),
diff --git a/mysql-test/suite/innodb/r/max_record_size,4k,compact,innodb.rdiff b/mysql-test/suite/innodb/r/max_record_size,4k,compact,innodb.rdiff
index 6f08dab3ca1..37871798f31 100644
--- a/mysql-test/suite/innodb/r/max_record_size,4k,compact,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,4k,compact,innodb.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:56:23.489432164 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -1,47 +1,37 @@
call mtr.add_suppression("Cannot add field `.*` in table `test`.`t1` because after adding it, the row size is");
CREATE TABLE t1 (
diff --git a/mysql-test/suite/innodb/r/max_record_size,4k,dynamic,innodb.rdiff b/mysql-test/suite/innodb/r/max_record_size,4k,dynamic,innodb.rdiff
index bd37d4354e7..b0b6b007d97 100644
--- a/mysql-test/suite/innodb/r/max_record_size,4k,dynamic,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,4k,dynamic,innodb.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:57:38.636143710 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -1,47 +1,103 @@
call mtr.add_suppression("Cannot add field `.*` in table `test`.`t1` because after adding it, the row size is");
CREATE TABLE t1 (
diff --git a/mysql-test/suite/innodb/r/max_record_size,4k,innodb,redundant.rdiff b/mysql-test/suite/innodb/r/max_record_size,4k,innodb,redundant.rdiff
index 93c39ff6714..0db6450d9c1 100644
--- a/mysql-test/suite/innodb/r/max_record_size,4k,innodb,redundant.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,4k,innodb,redundant.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:58:58.318768169 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -1,47 +1,37 @@
call mtr.add_suppression("Cannot add field `.*` in table `test`.`t1` because after adding it, the row size is");
CREATE TABLE t1 (
diff --git a/mysql-test/suite/innodb/r/max_record_size,64k,compact,innodb.rdiff b/mysql-test/suite/innodb/r/max_record_size,64k,compact,innodb.rdiff
index 5e6c62212d1..ab75975e238 100644
--- a/mysql-test/suite/innodb/r/max_record_size,64k,compact,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,64k,compact,innodb.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:45:21.760116841 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -3,45 +3,186 @@
c1 CHAR(255), c2 CHAR(255), c3 CHAR(255), c4 CHAR(255),
c5 CHAR(255), c6 CHAR(255), c7 CHAR(255), c8 CHAR(255),
diff --git a/mysql-test/suite/innodb/r/max_record_size,64k,dynamic,innodb.rdiff b/mysql-test/suite/innodb/r/max_record_size,64k,dynamic,innodb.rdiff
index 2cbf4cd9c54..3e13a510055 100644
--- a/mysql-test/suite/innodb/r/max_record_size,64k,dynamic,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,64k,dynamic,innodb.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:48:32.453208310 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -3,45 +3,486 @@
c1 CHAR(255), c2 CHAR(255), c3 CHAR(255), c4 CHAR(255),
c5 CHAR(255), c6 CHAR(255), c7 CHAR(255), c8 CHAR(255),
diff --git a/mysql-test/suite/innodb/r/max_record_size,64k,innodb,redundant.rdiff b/mysql-test/suite/innodb/r/max_record_size,64k,innodb,redundant.rdiff
index c324969fb21..a31285a2714 100644
--- a/mysql-test/suite/innodb/r/max_record_size,64k,innodb,redundant.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,64k,innodb,redundant.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 12:50:05.663724193 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -3,45 +3,104 @@
c1 CHAR(255), c2 CHAR(255), c3 CHAR(255), c4 CHAR(255),
c5 CHAR(255), c6 CHAR(255), c7 CHAR(255), c8 CHAR(255),
diff --git a/mysql-test/suite/innodb/r/max_record_size,8k,compact,innodb.rdiff b/mysql-test/suite/innodb/r/max_record_size,8k,compact,innodb.rdiff
index 4db669a8aa6..cafb5f25a0d 100644
--- a/mysql-test/suite/innodb/r/max_record_size,8k,compact,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,8k,compact,innodb.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 13:00:25.037261867 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -12,7 +12,7 @@
c9 CHAR(255), c10 CHAR(255), c11 CHAR(255), c12 CHAR(255),
c13 CHAR(255), c14 CHAR(255), c15 CHAR(255), c16 CHAR(255)
diff --git a/mysql-test/suite/innodb/r/max_record_size,8k,dynamic,innodb.rdiff b/mysql-test/suite/innodb/r/max_record_size,8k,dynamic,innodb.rdiff
index 5f79d77e7f9..2f592fbb4b3 100644
--- a/mysql-test/suite/innodb/r/max_record_size,8k,dynamic,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/max_record_size,8k,dynamic,innodb.rdiff
@@ -1,5 +1,5 @@
---- max_record_size.result 2019-07-03 11:54:44.591421526 +0300
-+++ max_record_size.reject 2019-07-03 13:01:51.215756779 +0300
+--- max_record_size.result
++++ max_record_size.reject
@@ -12,7 +12,7 @@
c9 CHAR(255), c10 CHAR(255), c11 CHAR(255), c12 CHAR(255),
c13 CHAR(255), c14 CHAR(255), c15 CHAR(255), c16 CHAR(255)
diff --git a/mysql-test/suite/innodb/r/mdev-14846.result b/mysql-test/suite/innodb/r/mdev-14846.result
index a1ccfb6bb4d..b5d8dcbec19 100644
--- a/mysql-test/suite/innodb/r/mdev-14846.result
+++ b/mysql-test/suite/innodb/r/mdev-14846.result
@@ -34,13 +34,25 @@ SET DEBUG_SYNC='now SIGNAL default_dml';
SET DEBUG_SYNC='now SIGNAL con2_dml';
connection default;
SET DEBUG_SYNC='now WAIT_FOR default_dml';
-UPDATE t3 AS alias1 LEFT JOIN t3 AS alias2 ON ( alias1.f1 <> alias1.f2 ) SET alias1.f3 = 59 WHERE ( EXISTS ( SELECT t1.f3 FROM t1 WHERE t1.f1 = alias1.f1 ) ) OR alias2.f1 = 'h';
+explain UPDATE t3 AS alias1 LEFT JOIN t3 AS alias2 ON ( alias1.f1 <> alias1.f2 ) SET alias1.f3 = 59 WHERE ( EXISTS ( SELECT t1.f3 FROM t1 IGNORE INDEX (f1) WHERE t1.f1 = alias1.f1 ) ) OR alias2.f1 = 'h';
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY alias1 ALL NULL NULL NULL NULL #
+1 PRIMARY alias2 ALL NULL NULL NULL NULL # Using where
+2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL # Using where
+UPDATE t3 AS alias1 LEFT JOIN t3 AS alias2 ON ( alias1.f1 <> alias1.f2 ) SET alias1.f3 = 59 WHERE ( EXISTS ( SELECT t1.f3 FROM t1 IGNORE INDEX (f1) WHERE t1.f1 = alias1.f1 ) ) OR alias2.f1 = 'h';
connect con2,localhost,root,,test;
set debug_sync='now WAIT_FOR con2_dml';
SET DEBUG_SYNC='now SIGNAL con1_dml2';
disconnect con2;
connection con1;
SET DEBUG_SYNC='now WAIT_FOR con1_dml2';
+explain UPDATE v4, t1 SET t1.pk = 76 WHERE t1.f2 IN ( SELECT t2.f FROM t2 INNER JOIN t3 );
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 index NULL f1 12 NULL # Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL #
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 13 func #
+2 MATERIALIZED t3 ALL NULL NULL NULL NULL #
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL #
UPDATE v4, t1 SET t1.pk = 76 WHERE t1.f2 IN ( SELECT t2.f FROM t2 INNER JOIN t3 );
connection default;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
diff --git a/mysql-test/suite/innodb/r/mdev-15707.result b/mysql-test/suite/innodb/r/mdev-15707.result
deleted file mode 100644
index 3967ce48b62..00000000000
--- a/mysql-test/suite/innodb/r/mdev-15707.result
+++ /dev/null
@@ -1,24 +0,0 @@
-CREATE TABLE t1(
-a INT AUTO_INCREMENT PRIMARY KEY,
-b CHAR(255),
-INDEX(b))
-ENGINE=InnoDB;
-INSERT INTO t1(b) SELECT UUID();
-BEGIN;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-INSERT INTO t1(b) SELECT UUID() FROM t1;
-COMMIT;
-UPDATE t1 SET b=UUID();
-DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/monitor.result b/mysql-test/suite/innodb/r/monitor.result
index 63f78752276..57f10731ed6 100644
--- a/mysql-test/suite/innodb/r/monitor.result
+++ b/mysql-test/suite/innodb/r/monitor.result
@@ -82,12 +82,8 @@ buffer_LRU_unzip_search_num_scan disabled
buffer_LRU_unzip_search_scanned_per_call disabled
buffer_page_read_index_leaf disabled
buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
buffer_page_read_undo_log disabled
buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
buffer_page_read_system_page disabled
buffer_page_read_trx_system disabled
buffer_page_read_fsp_hdr disabled
@@ -98,12 +94,8 @@ buffer_page_read_zblob2 disabled
buffer_page_read_other disabled
buffer_page_written_index_leaf disabled
buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
buffer_page_written_undo_log disabled
buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
buffer_page_written_system_page disabled
buffer_page_written_trx_system disabled
buffer_page_written_fsp_hdr disabled
@@ -171,14 +163,6 @@ adaptive_hash_rows_removed disabled
adaptive_hash_rows_deleted_no_hash_entry disabled
adaptive_hash_rows_updated disabled
file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
innodb_master_thread_sleeps disabled
innodb_activity_count disabled
innodb_master_active_loops disabled
diff --git a/mysql-test/suite/innodb/r/mvcc.result b/mysql-test/suite/innodb/r/mvcc.result
index 215b2165b8c..3e6b0ad35db 100644
--- a/mysql-test/suite/innodb/r/mvcc.result
+++ b/mysql-test/suite/innodb/r/mvcc.result
@@ -1,5 +1,3 @@
-SET @save_per_table= @@GLOBAL.innodb_file_per_table;
-SET GLOBAL innodb_file_per_table= 1;
#
# MDEV-15249 Crash in MVCC read after IMPORT TABLESPACE
#
@@ -43,4 +41,3 @@ $$
INSERT INTO t1 SET id=1,c294=1;
REPLACE t1 SET id=1,c294=1;
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table= @save_per_table;
diff --git a/mysql-test/suite/innodb/r/partition_locking.result b/mysql-test/suite/innodb/r/partition_locking.result
index f25b8a15a24..b5e96f965b3 100644
--- a/mysql-test/suite/innodb/r/partition_locking.result
+++ b/mysql-test/suite/innodb/r/partition_locking.result
@@ -148,7 +148,7 @@ a b c d e
03 03 343 7 03_03_343
03 06 343 8 03_06_343
03 07 343 9 03_07_343
-SELECT a,count(b) FROM t1 GROUP BY a ORDER BY a LOCK IN SHARE MODE SKIP LOCKED;
+SELECT a,count(b) FROM t1 force index (a) GROUP BY a ORDER BY a LOCK IN SHARE MODE SKIP LOCKED;
a count(b)
01 5
03 3
diff --git a/mysql-test/suite/innodb/r/restart,16k,innodb.rdiff b/mysql-test/suite/innodb/r/restart,16k,innodb.rdiff
index b36ed067913..3149b9aeab0 100644
--- a/mysql-test/suite/innodb/r/restart,16k,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/restart,16k,innodb.rdiff
@@ -1,5 +1,5 @@
---- ./suite/innodb/r/restart.result 2022-01-18 20:36:56.054653376 +1100
-+++ suite/innodb/r/restart.reject 2022-01-19 08:12:28.602794678 +1100
+--- ./suite/innodb/r/restart.result
++++ suite/innodb/r/restart.reject
@@ -32,10 +32,10 @@
SELECT @@innodb_buffer_pool_size INTO @innodb_buffer_pool_size_orig;
SELECT CEILING((256 + 64) * @@innodb_page_size / 1048576) * 1048576 INTO @min_pool_size;
diff --git a/mysql-test/suite/innodb/r/restart,32k,innodb.rdiff b/mysql-test/suite/innodb/r/restart,32k,innodb.rdiff
index 8fa057814c4..3f00646cb37 100644
--- a/mysql-test/suite/innodb/r/restart,32k,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/restart,32k,innodb.rdiff
@@ -1,5 +1,5 @@
---- ./suite/innodb/r/restart.result 2022-01-18 20:36:56.054653376 +1100
-+++ suite/innodb/r/restart.reject 2022-01-19 08:07:57.402230887 +1100
+--- ./suite/innodb/r/restart.result
++++ suite/innodb/r/restart.reject
@@ -32,10 +32,10 @@
SELECT @@innodb_buffer_pool_size INTO @innodb_buffer_pool_size_orig;
SELECT CEILING((256 + 64) * @@innodb_page_size / 1048576) * 1048576 INTO @min_pool_size;
diff --git a/mysql-test/suite/innodb/r/restart,4k,innodb.rdiff b/mysql-test/suite/innodb/r/restart,4k,innodb.rdiff
index 7d0846360e0..b00c56ef81f 100644
--- a/mysql-test/suite/innodb/r/restart,4k,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/restart,4k,innodb.rdiff
@@ -1,5 +1,5 @@
---- ./suite/innodb/r/restart.result 2022-01-18 20:36:56.054653376 +1100
-+++ suite/innodb/r/restart.reject 2022-01-19 08:13:56.397475513 +1100
+--- ./suite/innodb/r/restart.result
++++ suite/innodb/r/restart.reject
@@ -32,10 +32,10 @@
SELECT @@innodb_buffer_pool_size INTO @innodb_buffer_pool_size_orig;
SELECT CEILING((256 + 64) * @@innodb_page_size / 1048576) * 1048576 INTO @min_pool_size;
diff --git a/mysql-test/suite/innodb/r/restart,64k,innodb.rdiff b/mysql-test/suite/innodb/r/restart,64k,innodb.rdiff
index 3ac9f45b196..886cbcde7d9 100644
--- a/mysql-test/suite/innodb/r/restart,64k,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/restart,64k,innodb.rdiff
@@ -1,5 +1,5 @@
---- ./suite/innodb/r/restart.result 2022-01-18 20:36:56.054653376 +1100
-+++ suite/innodb/r/restart.reject 2022-01-19 08:11:32.418759095 +1100
+--- ./suite/innodb/r/restart.result
++++ suite/innodb/r/restart.reject
@@ -32,10 +32,10 @@
SELECT @@innodb_buffer_pool_size INTO @innodb_buffer_pool_size_orig;
SELECT CEILING((256 + 64) * @@innodb_page_size / 1048576) * 1048576 INTO @min_pool_size;
diff --git a/mysql-test/suite/innodb/r/restart,8k,innodb.rdiff b/mysql-test/suite/innodb/r/restart,8k,innodb.rdiff
index 4da55ebfcef..40a9e1bad1c 100644
--- a/mysql-test/suite/innodb/r/restart,8k,innodb.rdiff
+++ b/mysql-test/suite/innodb/r/restart,8k,innodb.rdiff
@@ -1,5 +1,5 @@
---- ./suite/innodb/r/restart.result 2022-01-18 20:36:56.054653376 +1100
-+++ suite/innodb/r/restart.reject 2022-01-19 08:13:11.027788852 +1100
+--- ./suite/innodb/r/restart.result
++++ suite/innodb/r/restart.reject
@@ -32,10 +32,10 @@
SELECT @@innodb_buffer_pool_size INTO @innodb_buffer_pool_size_orig;
SELECT CEILING((256 + 64) * @@innodb_page_size / 1048576) * 1048576 INTO @min_pool_size;
diff --git a/mysql-test/suite/innodb/r/row_format_redundant.result b/mysql-test/suite/innodb/r/row_format_redundant.result
index b798832e96f..f0286381809 100644
--- a/mysql-test/suite/innodb/r/row_format_redundant.result
+++ b/mysql-test/suite/innodb/r/row_format_redundant.result
@@ -1,11 +1,9 @@
SET GLOBAL innodb_fast_shutdown=0;
# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-data-file-path=ibdata1:1M:autoextend --innodb-undo-tablespaces=0 --innodb-stats-persistent=0
-SET GLOBAL innodb_file_per_table=1;
#
# Bug#21644827 - FTS, ASSERT !SRV_READ_ONLY_MODE || M_IMPL.M_LOG_MODE ==
# MTR_LOG_NO_REDO
#
-SET GLOBAL innodb_file_per_table=ON;
create table t1 (a int not null, d varchar(15) not null, b
varchar(198) not null, c char(156)) engine=InnoDB
row_format=redundant;
@@ -16,6 +14,8 @@ insert into t values(789, 'abcdef', 'jghikl', 'mnop');
insert into t values(134, 'kasdfsdsadf', 'adfjlasdkfjasd', 'adfsadflkasdasdfljasdf');
insert into t1 select a,d,b,c from t, seq_1_to_1024;
SET GLOBAL innodb_file_per_table=OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
create table t2 (a int not null, d varchar(15) not null, b
varchar(198) not null, c char(156), fulltext ftsic(c)) engine=InnoDB
row_format=redundant;
@@ -25,7 +25,6 @@ c varchar(150), index k1(c(99), b(56)), index k2(b(5), c(10))) engine=InnoDB
row_format=redundant;
insert into t3 values(444, 'dddd', 'bbbbb', 'aaaaa');
insert into t3 values(555, 'eeee', 'ccccc', 'aaaaa');
-SET GLOBAL innodb_fast_shutdown=0;
# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-data-file-path=ibdata1:1M:autoextend --innodb-undo-tablespaces=0 --innodb-stats-persistent=0 --innodb-read-only
SELECT COUNT(*) FROM t1;
COUNT(*)
@@ -68,7 +67,7 @@ DROP TABLE t1;
Warnings:
Warning 1932 Table 'test.t1' doesn't exist in engine
DROP TABLE t2,t3;
-FOUND 6 /\[ERROR\] InnoDB: Table test/t1 in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=1 SYS_TABLES\.MIX_LEN=511\b/ in mysqld.1.err
+FOUND 1 /\[ERROR\] InnoDB: Table test/t1 in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=1 SYS_TABLES\.MIX_LEN=511\b.*/ in mysqld.1.err
# restart
ib_buffer_pool
ib_logfile0
diff --git a/mysql-test/suite/innodb/r/table_flags.result b/mysql-test/suite/innodb/r/table_flags.result
index 779990351c6..e421126d2eb 100644
--- a/mysql-test/suite/innodb/r/table_flags.result
+++ b/mysql-test/suite/innodb/r/table_flags.result
@@ -1,5 +1,4 @@
# restart: with restart_parameters
-SET GLOBAL innodb_file_per_table=1;
CREATE TABLE tr(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
CREATE TABLE tc(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=COMPACT;
CREATE TABLE td(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
diff --git a/mysql-test/suite/innodb/r/table_index_statistics.result b/mysql-test/suite/innodb/r/table_index_statistics.result
index 286c5f9325f..ddb23afd346 100644
--- a/mysql-test/suite/innodb/r/table_index_statistics.result
+++ b/mysql-test/suite/innodb/r/table_index_statistics.result
@@ -14,13 +14,11 @@ ROWS_READ
10
SELECT ROWS_READ FROM INFORMATION_SCHEMA.INDEX_STATISTICS WHERE TABLE_NAME='t1';
ROWS_READ
-10
FLUSH TABLE_STATISTICS;
SELECT ROWS_READ FROM INFORMATION_SCHEMA.TABLE_STATISTICS WHERE TABLE_NAME='t1';
ROWS_READ
SELECT ROWS_READ FROM INFORMATION_SCHEMA.INDEX_STATISTICS WHERE TABLE_NAME='t1';
ROWS_READ
-10
FLUSH INDEX_STATISTICS;
SELECT ROWS_READ FROM INFORMATION_SCHEMA.INDEX_STATISTICS WHERE TABLE_NAME='t1';
ROWS_READ
@@ -32,7 +30,6 @@ ROWS_READ
10
SELECT ROWS_READ FROM INFORMATION_SCHEMA.INDEX_STATISTICS WHERE TABLE_NAME='t1';
ROWS_READ
-10
DROP TABLE t1;
CREATE TABLE t2 (c1 INT UNSIGNED);
ALTER TABLE t2 MODIFY c1 FLOAT;
diff --git a/mysql-test/suite/innodb/t/alter_kill.test b/mysql-test/suite/innodb/t/alter_kill.test
index 277d9b4e71b..fdff21e1ec0 100644
--- a/mysql-test/suite/innodb/t/alter_kill.test
+++ b/mysql-test/suite/innodb/t/alter_kill.test
@@ -25,7 +25,6 @@ call mtr.add_suppression("Table .*bug16720368.* is corrupted");
-- echo # Bug#16720368 INNODB CRASHES ON BROKEN #SQL*.IBD FILE AT STARTUP
-- echo #
-SET GLOBAL innodb_file_per_table=1;
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
CREATE TABLE bug16720368_1 (a INT PRIMARY KEY) ENGINE=InnoDB;
@@ -129,8 +128,6 @@ DROP TABLE bug16720368, bug16720368_1;
-- echo # DICT_TABLE_ADD_TO_CACHE
-- echo #
-SET GLOBAL innodb_file_per_table=1;
-
CREATE TEMPORARY TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
BEGIN;
INSERT INTO t1 VALUES(42);
diff --git a/mysql-test/suite/innodb/t/alter_missing_tablespace.test b/mysql-test/suite/innodb/t/alter_missing_tablespace.test
index 9742adc30a6..ff93ec5f9c6 100644
--- a/mysql-test/suite/innodb/t/alter_missing_tablespace.test
+++ b/mysql-test/suite/innodb/t/alter_missing_tablespace.test
@@ -21,7 +21,6 @@ call mtr.add_suppression("InnoDB: ALTER TABLE `test`.`t` DISCARD TABLESPACE fail
--enable_query_log
let $MYSQLD_DATADIR=`select @@datadir`;
-SET GLOBAL innodb_file_per_table=1;
CREATE TABLE t(a SERIAL)ENGINE=InnoDB;
CREATE TABLE `x..d` (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
CREATE TABLE t1(a SERIAL)ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb/t/encryption_threads_shutdown.test b/mysql-test/suite/innodb/t/encryption_threads_shutdown.test
index 5a63df5db86..d6e58a19ad9 100644
--- a/mysql-test/suite/innodb/t/encryption_threads_shutdown.test
+++ b/mysql-test/suite/innodb/t/encryption_threads_shutdown.test
@@ -10,10 +10,20 @@ call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE faile
--let $MYSQLD_DATADIR=`SELECT @@datadir`
--let $DATA= $MYSQLD_DATADIR/ibdata1
--let $DATACOPY=$MYSQLTEST_VARDIR/tmp/ibdata1
+--let $UNDO_1= $MYSQLD_DATADIR/undo001
+--let $UNDO_2= $MYSQLD_DATADIR/undo002
+--let $UNDO_3= $MYSQLD_DATADIR/undo003
+--let $UNDO_1_COPY=$MYSQLTEST_VARDIR/tmp/undo001
+--let $UNDO_2_COPY=$MYSQLTEST_VARDIR/tmp/undo002
+--let $UNDO_3_COPY=$MYSQLTEST_VARDIR/tmp/undo003
+
CREATE TABLE t(a INT) ENGINE=InnoDB;
--source include/kill_mysqld.inc
# Move the file to cause srv_init_abort_low() call from srv_start()
--move_file $DATA $DATACOPY
+--move_file $UNDO_1 $UNDO_1_COPY
+--move_file $UNDO_2 $UNDO_2_COPY
+--move_file $UNDO_3 $UNDO_3_COPY
# If the bug is not fixed, the server will hang here. Note that the test is
# unstable because the condition
@@ -28,5 +38,8 @@ CREATE TABLE t(a INT) ENGINE=InnoDB;
SELECT * FROM t;
--source include/kill_mysqld.inc
--move_file $DATACOPY $DATA
+--move_file $UNDO_1_COPY $UNDO_1
+--move_file $UNDO_2_COPY $UNDO_2
+--move_file $UNDO_3_COPY $UNDO_3
--source include/start_mysqld.inc
DROP TABLE t;
diff --git a/mysql-test/suite/innodb/t/gap_locks.test b/mysql-test/suite/innodb/t/gap_locks.test
index 77ce2c842b1..575a966ec42 100644
--- a/mysql-test/suite/innodb/t/gap_locks.test
+++ b/mysql-test/suite/innodb/t/gap_locks.test
@@ -2,7 +2,7 @@
CREATE TABLE t1(a INT PRIMARY KEY, b VARCHAR(40), c INT, INDEX(b,c))
ENGINE=InnoDB;
-INSERT INTO t1 VALUES (1,'1',1),(2,'2',1);
+INSERT INTO t1 VALUES (1,'1',1),(2,'2',1),(3,'3',1);
SET @save_locks= @@GLOBAL.innodb_status_output_locks;
SET GLOBAL INNODB_STATUS_OUTPUT_LOCKS = 'ON';
diff --git a/mysql-test/suite/innodb/t/ibuf_delete.test b/mysql-test/suite/innodb/t/ibuf_delete.test
deleted file mode 100644
index 82b740b6aa1..00000000000
--- a/mysql-test/suite/innodb/t/ibuf_delete.test
+++ /dev/null
@@ -1,67 +0,0 @@
---source include/have_innodb.inc
---source include/have_sequence.inc
-
-SET @buffering= @@innodb_change_buffering;
-SET GLOBAL innodb_change_buffering= deletes;
-SET @flush= @@innodb_flush_log_at_trx_commit;
-SET GLOBAL innodb_flush_log_at_trx_commit= 0;
-
-CREATE TABLE t1 (
- a varchar(1024),
- b varchar(1024),
- c varchar(1024),
- d varchar(1024),
- e varchar(1024),
- f varchar(1024),
- g varchar(1024),
- h varchar(1024),
- key (a),
- key (b),
- key (c),
- key (d)
-) ENGINE=InnoDB;
-
-INSERT INTO t1
-SELECT REPEAT('x',10), REPEAT('x',13), REPEAT('x',427), REPEAT('x',244),
-REPEAT('x',9), REPEAT('x',112), REPEAT('x',814), REPEAT('x',633)
-FROM seq_1_to_1024;
-
-CREATE TEMPORARY TABLE t2 (
- a varchar(1024),
- b varchar(1024),
- c varchar(1024),
- d varchar(1024),
- e varchar(1024),
- f varchar(1024),
- g varchar(1024),
- h varchar(1024),
- i varchar(1024),
- j varchar(1024),
- k varchar(1024),
- l varchar(1024),
- m varchar(1024),
- key (a),
- key (b),
- key (c),
- key (d),
- key (e),
- key (f)
-) ENGINE=InnoDB;
-
-SET @x=REPEAT('x',512);
-INSERT INTO t2 SELECT @x, @x, @x, @x, @x, @x, @x, @x, @x, @x, @x, @x, @x
-FROM seq_1_to_768;
-
---disable_query_log
---let $run=1024
-while ($run)
-{
- eval DELETE FROM t1 LIMIT 1 /* $run */;
- --dec $run
-}
---enable_query_log
-
-# Cleanup
-DROP TABLE t1, t2;
-SET GLOBAL innodb_change_buffering= @buffering;
-SET GLOBAL innodb_flush_log_at_trx_commit= @flush;
diff --git a/mysql-test/suite/innodb/t/ibuf_not_empty.combinations b/mysql-test/suite/innodb/t/ibuf_not_empty.combinations
deleted file mode 100644
index c4b45dcca32..00000000000
--- a/mysql-test/suite/innodb/t/ibuf_not_empty.combinations
+++ /dev/null
@@ -1,9 +0,0 @@
-[strict_crc32]
---innodb-checksum-algorithm=strict_crc32
---innodb-page-size=4k
---innodb-force-recovery=2
-
-[strict_full_crc32]
---innodb-checksum-algorithm=strict_full_crc32
---innodb-page-size=4k
---innodb-force-recovery=2
diff --git a/mysql-test/suite/innodb/t/ibuf_not_empty.test b/mysql-test/suite/innodb/t/ibuf_not_empty.test
deleted file mode 100644
index 9362f8daffa..00000000000
--- a/mysql-test/suite/innodb/t/ibuf_not_empty.test
+++ /dev/null
@@ -1,117 +0,0 @@
---source include/have_innodb.inc
---source include/no_valgrind_without_big.inc
-# innodb_change_buffering_debug option is debug only
---source include/have_debug.inc
-# Embedded server tests do not support restarting
---source include/not_embedded.inc
---source include/have_sequence.inc
-
---disable_query_log
-call mtr.add_suppression("InnoDB: Failed to find tablespace for table `test`\\.`t1` in the cache\\. Attempting to load the tablespace with space id");
-call mtr.add_suppression("InnoDB: Allocated tablespace ID \\d+ for test.t1, old maximum was");
-call mtr.add_suppression("InnoDB: Failed to find tablespace for table `mysql`\\.`transaction_registry` in the cache\\. Attempting to load the tablespace with space id");
-call mtr.add_suppression("InnoDB: Allocated tablespace ID \\d+ for mysql.transaction_registry, old maximum was");
-call mtr.add_suppression("InnoDB: Trying to read 4096 bytes");
-call mtr.add_suppression("InnoDB: File './test/t1.ibd' is corrupted");
---enable_query_log
-
-CREATE TABLE t1(
- a INT AUTO_INCREMENT PRIMARY KEY,
- b CHAR(1),
- c INT,
- INDEX(b))
-ENGINE=InnoDB STATS_PERSISTENT=0;
-
-# The flag innodb_change_buffering_debug is only available in debug builds.
-# It instructs InnoDB to try to evict pages from the buffer pool when
-# change buffering is possible, so that the change buffer will be used
-# whenever possible.
-SET GLOBAL innodb_change_buffering_debug = 1;
-SET GLOBAL innodb_change_buffering=all;
-
-# Create enough rows for the table, so that the change buffer will be
-# used for modifying the secondary index page. There must be multiple
-# index pages, because changes to the root page are never buffered.
-INSERT INTO t1 SELECT 0,'x',1 FROM seq_1_to_1024;
-let MYSQLD_DATADIR=`select @@datadir`;
-let PAGE_SIZE=`select @@innodb_page_size`;
-
---source include/shutdown_mysqld.inc
-
-# Corrupt the change buffer bitmap, to claim that pages are clean
-perl;
-do "$ENV{MTR_SUITE_DIR}/include/crc32.pl";
-my $file = "$ENV{MYSQLD_DATADIR}/test/t1.ibd";
-open(FILE, "+<$file") || die "Unable to open $file";
-binmode FILE;
-my $ps= $ENV{PAGE_SIZE};
-my $page;
-die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps;
-my $full_crc32 = unpack("N",substr($page,54,4)) & 0x10; # FIL_SPACE_FLAGS
-die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps;
-# Clean the change buffer bitmap.
-substr($page,38,$ps - 38 - 8) = chr(0) x ($ps - 38 - 8);
-my $polynomial = 0x82f63b78; # CRC-32C
-if ($full_crc32)
-{
- my $ck = mycrc32(substr($page, 0, $ps-4), 0, $polynomial);
- substr($page, $ps-4, 4) = pack("N", $ck);
-}
-else
-{
- my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
- mycrc32(substr($page, 38, $ps - 38 - 8), 0, $polynomial));
- substr($page,0,4)=$ck;
- substr($page,$ps-8,4)=$ck;
-}
-sysseek(FILE, $ps, 0) || die "Unable to rewind $file\n";
-syswrite(FILE, $page, $ps)==$ps || die "Unable to write $file\n";
-close(FILE) || die "Unable to close $file";
-EOF
-
---let $restart_parameters= --innodb-force-recovery=6 --innodb-change-buffer-dump
---source include/start_mysqld.inc
-
---replace_regex /contains \d+ entries/contains 990 entries/
-check table t1;
-
---source include/shutdown_mysqld.inc
-
-# Truncate the file to 5 pages, as if it were empty
-perl;
-do "$ENV{MTR_SUITE_DIR}/include/crc32.pl";
-my $file = "$ENV{MYSQLD_DATADIR}/test/t1.ibd";
-open(FILE, "+<$file") || die "Unable to open $file";
-binmode FILE;
-my $ps= $ENV{PAGE_SIZE};
-my $pages=5;
-my $page;
-die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps;
-my $full_crc32 = unpack("N",substr($page,54,4)) & 0x10; # FIL_SPACE_FLAGS
-substr($page,46,4)=pack("N", $pages);
-my $polynomial = 0x82f63b78; # CRC-32C
-if ($full_crc32)
-{
- my $ck = mycrc32(substr($page, 0, $ps-4), 0, $polynomial);
- substr($page, $ps-4, 4) = pack("N", $ck);
-}
-else
-{
- my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
- mycrc32(substr($page, 38, $ps - 38 - 8), 0, $polynomial));
- substr($page,0,4)=$ck;
- substr($page,$ps-8,4)=$ck;
-}
-sysseek(FILE, 0, 0) || die "Unable to rewind $file\n";
-syswrite(FILE, $page, $ps)==$ps || die "Unable to write $file\n";
-truncate(FILE, $ps * $pages);
-close(FILE) || die "Unable to close $file";
-EOF
-
---let $restart_parameters=--innodb-force_recovery=0
---source include/start_mysqld.inc
-SET GLOBAL innodb_fast_shutdown=0;
---source include/restart_mysqld.inc
-
-# Cleanup
-DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/index_tree_operation.test b/mysql-test/suite/innodb/t/index_tree_operation.test
index b9695db9346..e7958b2955d 100644
--- a/mysql-test/suite/innodb/t/index_tree_operation.test
+++ b/mysql-test/suite/innodb/t/index_tree_operation.test
@@ -9,12 +9,6 @@
# Otherwise, the follwing records 999,998,997 cause each page per record.
#
---disable_query_log
-SET @old_innodb_file_per_table = @@innodb_file_per_table;
---enable_query_log
-
-SET GLOBAL innodb_file_per_table=ON;
-
CREATE TABLE t1 (a BIGINT PRIMARY KEY, b VARCHAR(4096)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (0, REPEAT('a', 4096));
INSERT INTO t1 VALUES (1000, REPEAT('a', 4096));
@@ -68,7 +62,3 @@ WHERE s1.space = s2.space AND name = 'test/t1'
AND page_type = "INDEX" ORDER BY page_number;
DROP TABLE t1;
-
---disable_query_log
-SET GLOBAL innodb_file_per_table = @old_innodb_file_per_table;
---enable_query_log
diff --git a/mysql-test/suite/innodb/t/innodb-bug-14068765.test b/mysql-test/suite/innodb/t/innodb-bug-14068765.test
index c2446e0fecf..4435ac68021 100644
--- a/mysql-test/suite/innodb/t/innodb-bug-14068765.test
+++ b/mysql-test/suite/innodb/t/innodb-bug-14068765.test
@@ -2,7 +2,7 @@
let MYSQLD_DATADIR =`SELECT @@datadir`;
-# Export/import on the same instance, with --innodb-file-per-table=1
+# Export/import on the same instance
CREATE DATABASE testdb_wl5522;
CREATE TABLE testdb_wl5522.t1(col1 bit(1) , col2 boolean,col3 tinyint , col4 smallint , col5 mediumint ,col6 int , col7 bigint , col8 float (14,3) ,col9 double (14,3), col10 VARCHAR(20) CHARACTER SET utf8 , col11 TEXT CHARACTER SET binary , col12 ENUM('a','b','c') CHARACTER SET binary ,col13 TEXT CHARACTER SET latin1 COLLATE latin1_general_cs ,col14 CHAR(20) , col15 VARBINARY (400) , col16 BINARY(40), col17 BLOB (400) , col18 int not null primary key,col19 DATE ,col20 DATETIME , col21 TIMESTAMP ,col22 TIME , col23 YEAR ) ENGINE = Innodb;
diff --git a/mysql-test/suite/innodb/t/innodb-bug-14084530.test b/mysql-test/suite/innodb/t/innodb-bug-14084530.test
index f27fbbe31d7..2285b86c21a 100644
--- a/mysql-test/suite/innodb/t/innodb-bug-14084530.test
+++ b/mysql-test/suite/innodb/t/innodb-bug-14084530.test
@@ -4,7 +4,7 @@ let MYSQLD_DATADIR =`SELECT @@datadir`;
SET AUTOCOMMIT = 0;
-# Export/import on the same instance, with --innodb-file-per-table=1
+# Export/import on the same instance
CREATE DATABASE testdb_wl5522;
CREATE TABLE testdb_wl5522.t1 (c1 int ) engine = Innodb;
diff --git a/mysql-test/suite/innodb/t/innodb-enlarge-blob.opt b/mysql-test/suite/innodb/t/innodb-enlarge-blob.opt
index 2f9bf78f82e..1251c5f2aed 100644
--- a/mysql-test/suite/innodb/t/innodb-enlarge-blob.opt
+++ b/mysql-test/suite/innodb/t/innodb-enlarge-blob.opt
@@ -1,4 +1,3 @@
---innodb-file-per-table
--innodb-buffer-pool-size=32M
--innodb-page-size=64k
--innodb-strict-mode=OFF
diff --git a/mysql-test/suite/innodb/t/innodb-fkcheck.test b/mysql-test/suite/innodb/t/innodb-fkcheck.test
index 5ff3533fce1..bec4de7b670 100644
--- a/mysql-test/suite/innodb/t/innodb-fkcheck.test
+++ b/mysql-test/suite/innodb/t/innodb-fkcheck.test
@@ -4,17 +4,6 @@
#
# MDEV-10083: Orphan ibd file when playing with foreign keys
#
---disable_query_log
-SET @start_global_fpt = @@global.innodb_file_per_table;
-SET @start_global_fkc = @@global.foreign_key_checks;
---enable_query_log
-
-set global innodb_file_per_table = 1;
-
---disable_warnings
-drop table if exists b;
-drop database if exists bug_fk;
---enable_warnings
let $MYSQLD_DATADIR = `select @@datadir`;
@@ -114,12 +103,6 @@ show warnings;
#
# Cleanup
#
---disable_query_log
-SET @@global.innodb_file_per_table = @start_global_fpt;
-SET @@global.foreign_key_checks = @start_global_fkc;
---enable_query_log
-
---disable_warnings
-drop table if exists b;
-drop database if exists bug_fk;
---enable_warnings
+
+drop table b;
+drop database bug_fk;
diff --git a/mysql-test/suite/innodb/t/innodb-index-online.test b/mysql-test/suite/innodb/t/innodb-index-online.test
index ab4f5a965da..827941817f6 100644
--- a/mysql-test/suite/innodb/t/innodb-index-online.test
+++ b/mysql-test/suite/innodb/t/innodb-index-online.test
@@ -9,10 +9,6 @@ SELECT name, count FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE subsystem = 'ddl
call mtr.add_suppression("InnoDB: Warning: Small buffer pool size");
-# DISCARD TABLESPACE needs file-per-table
-SET @global_innodb_file_per_table_orig = @@global.innodb_file_per_table;
-SET GLOBAL innodb_file_per_table = on;
-
# Save the initial number of concurrent sessions.
--source include/count_sessions.inc
@@ -515,7 +511,6 @@ SET DEBUG_SYNC = 'RESET';
# gone so execution of other tests won't be affected by their presence.
--source include/wait_until_count_sessions.inc
-SET GLOBAL innodb_file_per_table = @global_innodb_file_per_table_orig;
--disable_warnings
SET GLOBAL innodb_monitor_enable = default;
SET GLOBAL innodb_monitor_disable = default;
diff --git a/mysql-test/suite/innodb/t/innodb-table-online.test b/mysql-test/suite/innodb/t/innodb-table-online.test
index 45b1bc1ec8e..5f0562e11ed 100644
--- a/mysql-test/suite/innodb/t/innodb-table-online.test
+++ b/mysql-test/suite/innodb/t/innodb-table-online.test
@@ -11,10 +11,6 @@ call mtr.add_suppression("InnoDB: Warning: Small buffer pool size");
call mtr.add_suppression("InnoDB: Error: table 'test/t1'");
call mtr.add_suppression("MariaDB is trying to open a table handle but the .ibd file for");
-# DISCARD TABLESPACE needs file-per-table
-SET @global_innodb_file_per_table_orig = @@global.innodb_file_per_table;
-SET GLOBAL innodb_file_per_table = on;
-
# Save the initial number of concurrent sessions.
--source include/count_sessions.inc
@@ -447,7 +443,6 @@ disconnect con1;
# gone so execution of other tests won't be affected by their presence.
--source include/wait_until_count_sessions.inc
-SET GLOBAL innodb_file_per_table = @global_innodb_file_per_table_orig;
--disable_warnings
SET GLOBAL innodb_monitor_enable = default;
SET GLOBAL innodb_monitor_disable = default;
diff --git a/mysql-test/suite/innodb/t/innodb-wl5522-debug.test b/mysql-test/suite/innodb/t/innodb-wl5522-debug.test
index efa2b1ea403..e970bd842e2 100644
--- a/mysql-test/suite/innodb/t/innodb-wl5522-debug.test
+++ b/mysql-test/suite/innodb/t/innodb-wl5522-debug.test
@@ -35,7 +35,6 @@ FLUSH TABLES;
let MYSQLD_DATADIR =`SELECT @@datadir`;
let $strerrfix=/ (\(.+\))//;
-SET GLOBAL innodb_file_per_table = 1;
CREATE TABLE t1 (c1 INT) ENGINE = InnoDB;
INSERT INTO t1 VALUES(1),(2),(3);
@@ -52,8 +51,6 @@ ALTER TABLE t1 DISCARD TABLESPACE;
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table = 1;
-
CREATE TABLE t1 (c1 INT) ENGINE = InnoDB;
INSERT INTO t1 VALUES(1),(2),(3);
@@ -66,8 +63,6 @@ ALTER TABLE t1 DISCARD TABLESPACE;
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table = 1;
-
# Create the table that we will use for crash recovery (during IMPORT)
CREATE TABLE t1 (c1 INT) ENGINE = Innodb;
INSERT INTO t1 VALUES (1), (2), (3), (4);
@@ -130,9 +125,6 @@ EOF
DROP TABLE t1;
-SET @file_per_table= @@innodb_file_per_table;
-SET GLOBAL innodb_file_per_table = 1;
-
CREATE TABLE t1 (c1 INT) ENGINE = Innodb;
ALTER TABLE t1 DISCARD TABLESPACE;
@@ -960,21 +952,6 @@ do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
ib_restore_tablespaces("test", "t1");
EOF
-# Test failure after ibuf check
-SET SESSION debug_dbug="+d,ib_import_check_bitmap_failure";
-
-# Need proper mapping of error codes :-(
---error ER_NOT_KEYFILE
-ALTER TABLE t1 IMPORT TABLESPACE;
-
-SET SESSION debug_dbug=@saved_debug_dbug;
-
-# Restore files
-perl;
-do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
-ib_restore_tablespaces("test", "t1");
-EOF
-
# Test failure after adjusting the cluster index root page
SET SESSION debug_dbug="+d,ib_import_cluster_root_adjust_failure";
@@ -1397,5 +1374,3 @@ call mtr.add_suppression("Index for table 't1' is corrupt; try to repair it");
#cleanup
--remove_file $MYSQLTEST_VARDIR/tmp/t1.cfg
--remove_file $MYSQLTEST_VARDIR/tmp/t1.ibd
-
-SET GLOBAL INNODB_FILE_PER_TABLE=@file_per_table;
diff --git a/mysql-test/suite/innodb/t/innodb-wl5522.test b/mysql-test/suite/innodb/t/innodb-wl5522.test
index 19652d8e8fd..c5c61ded651 100644
--- a/mysql-test/suite/innodb/t/innodb-wl5522.test
+++ b/mysql-test/suite/innodb/t/innodb-wl5522.test
@@ -98,7 +98,6 @@ if ($checksum_algorithm == "strict_full_crc32") {
ALTER TABLE t2 IMPORT TABLESPACE;
DROP TABLE t2;
-SET GLOBAL innodb_file_per_table = 1;
SELECT @@innodb_file_per_table;
let MYSQLD_DATADIR =`SELECT @@datadir`;
@@ -114,7 +113,7 @@ ALTER TABLE t1 IMPORT TABLESPACE;
SELECT * FROM t1;
DROP TABLE t1;
-# Export/import on the same instance, with --innodb-file-per-table=1
+# Export/import on the same instance
CREATE TABLE t1(
c1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
c2 INT) ENGINE=InnoDB;
@@ -158,7 +157,7 @@ SELECT COUNT(*) FROM t1;
DROP TABLE t1;
-# Export/import on the same instance, with --innodb-file-per-table=1
+# Export/import on the same instance
# Insert some more records to move the LSN forward and then drop the
# table and restore
CREATE TABLE t1(
@@ -204,7 +203,7 @@ SELECT COUNT(*) FROM t1;
DROP TABLE t1;
-# Export/import on the same instance, with --innodb-file-per-table=1
+# Export/import on the same instance
# Insert some more records to move the LSN forward and then drop the
# table and restore, this time the table has a secondary index too.
CREATE TABLE t1(
@@ -245,7 +244,7 @@ SELECT COUNT(*) FROM t1 WHERE c2 = 1;
DROP TABLE t1;
-# Export/import on the same instance, with --innodb-file-per-table=1
+# Export/import on the same instance
# Insert some more records to move the LSN forward and then drop the
# table and restore, this time the table has a secondary index too.
# Rename the index on the create so that the IMPORT fails, drop index
diff --git a/mysql-test/suite/innodb/t/innodb-wl5980-alter.test b/mysql-test/suite/innodb/t/innodb-wl5980-alter.test
index 09c54db644f..6627b77ed9c 100644
--- a/mysql-test/suite/innodb/t/innodb-wl5980-alter.test
+++ b/mysql-test/suite/innodb/t/innodb-wl5980-alter.test
@@ -6,7 +6,6 @@
--source include/have_innodb.inc
--source include/have_symlink.inc
-SET @innodb_file_per_table_orig=@@GLOBAL.innodb_file_per_table;
LET $regexp=/FTS_[0-9a-f_]+([A-Z0-9_]+)\.([islbd]{3})/FTS_AUX_\1.\2/;
# Set up some variables
@@ -14,7 +13,6 @@ LET $MYSQL_DATA_DIR = `select @@datadir`;
LET $data_directory_clause = DATA DIRECTORY='$MYSQL_TMP_DIR/alt_dir';
SET default_storage_engine=InnoDB;
-SET GLOBAL innodb_file_per_table=ON;
SET NAMES utf8mb4;
@@ -706,5 +704,3 @@ DROP TABLE tt, t1o, sys_tables, sys_indexes, sys_foreign;
--list_files $MYSQL_TMP_DIR/alt_dir/test
--rmdir $MYSQL_TMP_DIR/alt_dir/test
--rmdir $MYSQL_TMP_DIR/alt_dir
-
-SET GLOBAL innodb_file_per_table = @innodb_file_per_table_orig;
diff --git a/mysql-test/suite/innodb/t/innodb.opt b/mysql-test/suite/innodb/t/innodb.opt
deleted file mode 100644
index 59e43fea231..00000000000
--- a/mysql-test/suite/innodb/t/innodb.opt
+++ /dev/null
@@ -1 +0,0 @@
---innodb-defragment=0 \ No newline at end of file
diff --git a/mysql-test/suite/innodb/t/innodb.test b/mysql-test/suite/innodb/t/innodb.test
index 58e9899bd40..9211a51dcfc 100644
--- a/mysql-test/suite/innodb/t/innodb.test
+++ b/mysql-test/suite/innodb/t/innodb.test
@@ -1106,7 +1106,13 @@ CREATE TABLE t2 (
) ENGINE=InnoDB;
INSERT INTO t2 VALUES("3524", "1"),("3525", "1"),("1794", "4"),("102", "5"),("1822", "6"),("3382", "9");
+# We have to analyze the tables to make the row count stable
+ANALYZE table t1,t2;
+explain SELECT t2.id, t1.`label` FROM t2 INNER JOIN
+(SELECT t1.id_object as id_object FROM t1 WHERE t1.`label` LIKE '%test%') AS lbl
+ON (t2.id = lbl.id_object) INNER JOIN t1 ON (t2.id = t1.id_object);
+--sorted_result
SELECT t2.id, t1.`label` FROM t2 INNER JOIN
(SELECT t1.id_object as id_object FROM t1 WHERE t1.`label` LIKE '%test%') AS lbl
ON (t2.id = lbl.id_object) INNER JOIN t1 ON (t2.id = t1.id_object);
diff --git a/mysql-test/suite/innodb/t/innodb_buffer_pool_load_now.test b/mysql-test/suite/innodb/t/innodb_buffer_pool_load_now.test
index baced6e9e11..c9e4dc25e7f 100644
--- a/mysql-test/suite/innodb/t/innodb_buffer_pool_load_now.test
+++ b/mysql-test/suite/innodb/t/innodb_buffer_pool_load_now.test
@@ -69,7 +69,7 @@ EOF
--move_file $file $file.now
-# Complete purge (and change buffer merge).
+# Complete purge.
SET GLOBAL innodb_fast_shutdown=0;
--source include/shutdown_mysqld.inc
diff --git a/mysql-test/suite/innodb/t/innodb_bug14147491-master.opt b/mysql-test/suite/innodb/t/innodb_bug14147491-master.opt
index 614dd9356f1..8434ff04c6c 100644
--- a/mysql-test/suite/innodb/t/innodb_bug14147491-master.opt
+++ b/mysql-test/suite/innodb/t/innodb_bug14147491-master.opt
@@ -1,4 +1,3 @@
---innodb_file_per_table=1
--loose-skip-stack-trace
--skip-core-file
--loose-innodb_buffer_pool_load_at_startup=OFF
diff --git a/mysql-test/suite/innodb/t/innodb_bug14147491.test b/mysql-test/suite/innodb/t/innodb_bug14147491.test
index 3c37f1b7cce..4f4810f84ae 100644
--- a/mysql-test/suite/innodb/t/innodb_bug14147491.test
+++ b/mysql-test/suite/innodb/t/innodb_bug14147491.test
@@ -21,8 +21,6 @@ SET GLOBAL innodb_fast_shutdown=0;
--echo # Create and populate the table to be corrupted
-set global innodb_file_per_table=ON;
-
CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB;
INSERT INTO t1 (b) VALUES ('corrupt me');
--disable_query_log
diff --git a/mysql-test/suite/innodb/t/innodb_bug39438-master.opt b/mysql-test/suite/innodb/t/innodb_bug39438-master.opt
deleted file mode 100644
index 0746d13d1c0..00000000000
--- a/mysql-test/suite/innodb/t/innodb_bug39438-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---loose-innodb-file-per-table=1
diff --git a/mysql-test/suite/innodb/t/innodb_bug39438.test b/mysql-test/suite/innodb/t/innodb_bug39438.test
index 635d65f045b..04275197e13 100644
--- a/mysql-test/suite/innodb/t/innodb_bug39438.test
+++ b/mysql-test/suite/innodb/t/innodb_bug39438.test
@@ -3,10 +3,6 @@
# Bug#39438 Testcase for Bug#39436 crashes on 5.1 in fil_space_get_latch
# http://bugs.mysql.com/39438
#
-# This test must be run with innodb_file_per_table=1 because the crash
-# only occurs if that option is turned on and DISCARD TABLESPACE only
-# works with innodb_file_per_table.
-#
SET default_storage_engine=InnoDB;
diff --git a/mysql-test/suite/innodb/t/innodb_bug56947.test b/mysql-test/suite/innodb/t/innodb_bug56947.test
index ce64f1a8322..3e85e614812 100644
--- a/mysql-test/suite/innodb/t/innodb_bug56947.test
+++ b/mysql-test/suite/innodb/t/innodb_bug56947.test
@@ -4,7 +4,8 @@
-- source include/have_innodb.inc
-- source include/have_debug.inc
-SET GLOBAL innodb_file_per_table=0;
+SET @save_fpt=@@GLOBAL.innodb_file_per_table;
+SET GLOBAL innodb_file_per_table=OFF;
create table bug56947(a int not null) engine = innodb;
SET @saved_dbug = @@SESSION.debug_dbug;
@@ -15,5 +16,5 @@ alter table bug56947 add unique index (a);
check table bug56947;
drop table bug56947;
-SET @@global.innodb_file_per_table=DEFAULT;
+SET GLOBAL innodb_file_per_table=@save_fpt;
SET debug_dbug= @saved_dbug;
diff --git a/mysql-test/suite/innodb/t/innodb_bug59733.test b/mysql-test/suite/innodb/t/innodb_bug59733.test
deleted file mode 100644
index 0b1bff51932..00000000000
--- a/mysql-test/suite/innodb/t/innodb_bug59733.test
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# Bug #59733 Possible deadlock when buffered changes are to be discarded
-# in buf_page_create
-#
--- source include/have_innodb.inc
-
--- disable_query_log
-# The flag innodb_change_buffering_debug is only available in debug builds.
-# It instructs InnoDB to try to evict pages from the buffer pool when
-# change buffering is possible, so that the change buffer will be used
-# whenever possible.
--- error 0,ER_UNKNOWN_SYSTEM_VARIABLE
-SET @innodb_change_buffering_debug_orig = @@innodb_change_buffering_debug;
--- error 0,ER_UNKNOWN_SYSTEM_VARIABLE
-SET GLOBAL innodb_change_buffering_debug = 1;
--- enable_query_log
-
-CREATE TABLE bug59733(a INT AUTO_INCREMENT PRIMARY KEY,b CHAR(1))ENGINE=InnoDB;
-
-# Create enough rows for the table, so that the insert buffer will be
-# used. There must be multiple index pages, because changes to the
-# root page are never buffered.
-
-INSERT INTO bug59733 VALUES(0,'x');
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-INSERT INTO bug59733 SELECT 0,b FROM bug59733;
-
-# Create the secondary index for which changes will be buffered.
-CREATE INDEX b ON bug59733 (b);
-
-# This should be buffered, if innodb_change_buffering_debug = 1 is in effect.
-DELETE FROM bug59733 WHERE (a%100)=0;
-
-# Drop the index in order to get free pages with orphaned buffered changes.
-DROP INDEX b ON bug59733;
-
-# Create the index and attempt to reuse pages for which buffered changes exist.
-CREATE INDEX b ON bug59733 (b);
-
-DROP TABLE bug59733;
-
--- disable_query_log
--- error 0, ER_UNKNOWN_SYSTEM_VARIABLE
-SET GLOBAL innodb_change_buffering_debug = @innodb_change_buffering_debug_orig;
diff --git a/mysql-test/suite/innodb/t/innodb_bug68148.test b/mysql-test/suite/innodb/t/innodb_bug68148.test
index ab4e0311656..432e8fbf03e 100644
--- a/mysql-test/suite/innodb/t/innodb_bug68148.test
+++ b/mysql-test/suite/innodb/t/innodb_bug68148.test
@@ -6,8 +6,6 @@
# MDEV-8845: Table disappear after modifying FK
#
-set global innodb_file_per_table=1;
-
CREATE TABLE ref_table1 (id int(11) NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB;
CREATE TABLE ref_table2 (id int(11) NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb/t/innodb_bulk_create_index_small.test b/mysql-test/suite/innodb/t/innodb_bulk_create_index_small.test
index d04dd59f7e7..75e011a5094 100644
--- a/mysql-test/suite/innodb/t/innodb_bulk_create_index_small.test
+++ b/mysql-test/suite/innodb/t/innodb_bulk_create_index_small.test
@@ -80,11 +80,6 @@ SELECT CHAR_LENGTH(b) FROM t1 WHERE a=4975;
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table=default;
-
-# Test Compressed Table
-SET GLOBAL innodb_file_per_table=1;
-
CREATE TABLE t1(
class INT,
id INT,
@@ -143,6 +138,4 @@ SELECT CHAR_LENGTH(b) FROM t1 WHERE a=4975;
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table=default;
-
DROP PROCEDURE populate_t1;
diff --git a/mysql-test/suite/innodb/t/innodb_defragment.opt b/mysql-test/suite/innodb/t/innodb_defragment.opt
index aea3d480c24..12b046be786 100644
--- a/mysql-test/suite/innodb/t/innodb_defragment.opt
+++ b/mysql-test/suite/innodb/t/innodb_defragment.opt
@@ -1,5 +1,4 @@
--loose-innodb-buffer-pool-stats
--loose-innodb-buffer-page
--loose-innodb-buffer-page-lru
---innodb-file-per-table
---innodb-defragment=1 \ No newline at end of file
+--innodb-defragment=1
diff --git a/mysql-test/suite/innodb/t/insert_debug.test b/mysql-test/suite/innodb/t/insert_debug.test
index c370c402ac7..7fe584a8ff7 100644
--- a/mysql-test/suite/innodb/t/insert_debug.test
+++ b/mysql-test/suite/innodb/t/insert_debug.test
@@ -7,13 +7,11 @@
--echo # CAUSES INFINITE PAGE SPLIT
--echo #
-SET GLOBAL innodb_change_buffering_debug=1;
SET GLOBAL innodb_limit_optimistic_insert_debug=1;
CREATE TABLE t1(c1 INT PRIMARY KEY) ENGINE=InnoDB
PARTITION BY HASH (c1) PARTITIONS 15;
DROP TABLE t1;
-SET GLOBAL innodb_change_buffering_debug=0;
SET GLOBAL innodb_limit_optimistic_insert_debug=0;
--echo #
diff --git a/mysql-test/suite/innodb/t/log_corruption.test b/mysql-test/suite/innodb/t/log_corruption.test
index 6f7080f5b50..f54aebf317e 100644
--- a/mysql-test/suite/innodb/t/log_corruption.test
+++ b/mysql-test/suite/innodb/t/log_corruption.test
@@ -57,16 +57,24 @@ my $head = pack("Nx[18]", 0);
my $body = pack("x[8]Nx[10]Nx[16312]", 768, 97937874);
my $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial);
print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck);
-# Dummy pages 1..6.
-$body = pack("x[16338]");
-for (my($page) = 1; $page < 7; $page++)
-{
- ## FIL_PAGE_OFFSET
- $head = pack("Nx[18]", $page);
- $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial);
- print OUT pack("N",$ck).$head.pack("x[16350]Nx[4]",$ck);
-}
+
+# Dummy change buffer header page (page 3).
+die unless seek(OUT, 3 * 16384, 0);
+## FIL_PAGE_OFFSET, FIL_PAGE_PREV, FIL_PAGE_NEXT, FIL_PAGE_TYPE
+my $head = pack("NNNx[8]n", 3, 0xffffffff, 0xffffffff, 6);
+my $body = pack("x[62]nnx[16272]", 2, 50);
+my $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial);
+print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck);
+
+# Dummy change buffer root page (page 4).
+## FIL_PAGE_OFFSET, FIL_PAGE_PREV, FIL_PAGE_NEXT
+my $head = pack("NNNx[10]", 4, 0xffffffff, 0xffffffff);
+my $body = chr(0) x 16338;
+my $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial);
+print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck);
+
# Dictionary header page (page 7).
+die unless seek(OUT, 7 * 16384, 0);
## FIL_PAGE_OFFSET
$head = pack("Nx[18]", 7);
## DICT_HDR_TABLES,DICT_HDR_INDEXES
diff --git a/mysql-test/suite/innodb/t/log_data_file_size.opt b/mysql-test/suite/innodb/t/log_data_file_size.opt
index d9a364a3287..20e2cf0dc40 100644
--- a/mysql-test/suite/innodb/t/log_data_file_size.opt
+++ b/mysql-test/suite/innodb/t/log_data_file_size.opt
@@ -1,2 +1,3 @@
--loose-innodb-sys-indexes
--innodb-data-file-path=ibdata1:1M:autoextend
+--innodb-undo-tablespaces=0
diff --git a/mysql-test/suite/innodb/t/log_file_name.test b/mysql-test/suite/innodb/t/log_file_name.test
index eb616d7f835..1945be89599 100644
--- a/mysql-test/suite/innodb/t/log_file_name.test
+++ b/mysql-test/suite/innodb/t/log_file_name.test
@@ -7,7 +7,6 @@
# Embedded server does not support crashing
--source include/not_embedded.inc
-SET GLOBAL innodb_file_per_table=ON;
FLUSH TABLES;
CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb/t/log_upgrade.test b/mysql-test/suite/innodb/t/log_upgrade.test
index faf88c41bef..a3d237875fe 100644
--- a/mysql-test/suite/innodb/t/log_upgrade.test
+++ b/mysql-test/suite/innodb/t/log_upgrade.test
@@ -45,9 +45,24 @@ my $head = pack("Nx[18]", 0);
my $body = pack("x[8]Nx[10]Nx[16312]", 768, 97937874);
my $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial);
print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck);
-# Dummy pages 1..6.
-print OUT chr(0) x (6 * 16384);
+
+# Dummy change buffer header page (page 3).
+die unless seek(OUT, 3 * 16384, 0);
+## FIL_PAGE_OFFSET, FIL_PAGE_PREV, FIL_PAGE_NEXT, FIL_PAGE_TYPE
+my $head = pack("NNNx[8]n", 3, 0xffffffff, 0xffffffff, 6);
+my $body = pack("x[62]nnx[16272]", 2, 50);
+my $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial);
+print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck);
+
+# Dummy change buffer root page (page 4).
+## FIL_PAGE_OFFSET, FIL_PAGE_PREV, FIL_PAGE_NEXT
+my $head = pack("NNNx[10]", 4, 0xffffffff, 0xffffffff);
+my $body = chr(0) x 16338;
+my $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial);
+print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck);
+
# Dictionary header page (page 7).
+die unless seek(OUT, 7 * 16384, 0);
## FIL_PAGE_OFFSET
$head = pack("Nx[18]", 7);
## DICT_HDR_TABLES,DICT_HDR_INDEXES
diff --git a/mysql-test/suite/innodb/t/mdev-14846.test b/mysql-test/suite/innodb/t/mdev-14846.test
index b1f32302591..a576d244007 100644
--- a/mysql-test/suite/innodb/t/mdev-14846.test
+++ b/mysql-test/suite/innodb/t/mdev-14846.test
@@ -39,7 +39,9 @@ SET DEBUG_SYNC='now SIGNAL con2_dml';
--connection default
SET DEBUG_SYNC='now WAIT_FOR default_dml';
---send UPDATE t3 AS alias1 LEFT JOIN t3 AS alias2 ON ( alias1.f1 <> alias1.f2 ) SET alias1.f3 = 59 WHERE ( EXISTS ( SELECT t1.f3 FROM t1 WHERE t1.f1 = alias1.f1 ) ) OR alias2.f1 = 'h'
+--replace_column 9 #
+explain UPDATE t3 AS alias1 LEFT JOIN t3 AS alias2 ON ( alias1.f1 <> alias1.f2 ) SET alias1.f3 = 59 WHERE ( EXISTS ( SELECT t1.f3 FROM t1 IGNORE INDEX (f1) WHERE t1.f1 = alias1.f1 ) ) OR alias2.f1 = 'h';
+--send UPDATE t3 AS alias1 LEFT JOIN t3 AS alias2 ON ( alias1.f1 <> alias1.f2 ) SET alias1.f3 = 59 WHERE ( EXISTS ( SELECT t1.f3 FROM t1 IGNORE INDEX (f1) WHERE t1.f1 = alias1.f1 ) ) OR alias2.f1 = 'h'
# It holds the lock of all record in t3 and tries to acquire record lock for the table t1.
--connect (con2,localhost,root,,test)
@@ -53,6 +55,8 @@ disconnect con2;
# Cleanup
--connection con1
SET DEBUG_SYNC='now WAIT_FOR con1_dml2';
+--replace_column 9 #
+explain UPDATE v4, t1 SET t1.pk = 76 WHERE t1.f2 IN ( SELECT t2.f FROM t2 INNER JOIN t3 );
UPDATE v4, t1 SET t1.pk = 76 WHERE t1.f2 IN ( SELECT t2.f FROM t2 INNER JOIN t3 );
# It holds the record lock on table t1 and tries to acquire record lock on t3.
# leads to deadlock (con1 trx is waiting for default trx and vice versa)
diff --git a/mysql-test/suite/innodb/t/mdev-15707.opt b/mysql-test/suite/innodb/t/mdev-15707.opt
deleted file mode 100644
index fec3463c6d0..00000000000
--- a/mysql-test/suite/innodb/t/mdev-15707.opt
+++ /dev/null
@@ -1 +0,0 @@
---innodb --innodb-buffer-pool-size=5MB --innodb-read-io-threads=1 --innodb-doublewrite=0 --innodb-flush-log-at-trx-commit=0 \ No newline at end of file
diff --git a/mysql-test/suite/innodb/t/mdev-15707.test b/mysql-test/suite/innodb/t/mdev-15707.test
deleted file mode 100644
index a0446ef4f81..00000000000
--- a/mysql-test/suite/innodb/t/mdev-15707.test
+++ /dev/null
@@ -1,30 +0,0 @@
---source include/windows.inc
-# This test is slow on buildbot.
---source include/big_test.inc
-
-# Deadlock in conjunction with the innodb change buffering.
-
-# When innodb change buffering kicks in, i.e secondary non-unique index
-# does not fit into the bufferpool, then, on Windows, innodb
-# background threads could deadlock whenever index page is
-# read, and the page needs load/merge change buffer.
-# The test tries to reproduce this situation, by creating index
-# that does not fit into bufferpool, and doing a large update.
-
-CREATE TABLE t1(
-a INT AUTO_INCREMENT PRIMARY KEY,
-b CHAR(255),
-INDEX(b))
-ENGINE=InnoDB;
-
-INSERT INTO t1(b) SELECT UUID();
-BEGIN;
-let $i=`select cast(log2(@@innodb_buffer_pool_size/255) as int)`;
-while ($i)
-{
- INSERT INTO t1(b) SELECT UUID() FROM t1;
- dec $i;
-}
-COMMIT;
-UPDATE t1 SET b=UUID();
-DROP TABLE t1; \ No newline at end of file
diff --git a/mysql-test/suite/innodb/t/monitor.test b/mysql-test/suite/innodb/t/monitor.test
index 9b093d41892..7d41a5507e8 100644
--- a/mysql-test/suite/innodb/t/monitor.test
+++ b/mysql-test/suite/innodb/t/monitor.test
@@ -258,7 +258,7 @@ drop table monitor_test;
set global innodb_monitor_enable = file_num_open_files;
-# Counters are unpredictable when innodb-file-per-table is on
+# Counters are unpredictable
--replace_column 2 # 3 # 4 # 5 # 6 # 7 #
select name, max_count, min_count, count,
max_count_reset, min_count_reset, count_reset,
diff --git a/mysql-test/suite/innodb/t/mvcc.test b/mysql-test/suite/innodb/t/mvcc.test
index 7c37718c28a..e0baf7f83d3 100644
--- a/mysql-test/suite/innodb/t/mvcc.test
+++ b/mysql-test/suite/innodb/t/mvcc.test
@@ -1,9 +1,6 @@
--source include/have_innodb.inc
--source include/have_sequence.inc
-SET @save_per_table= @@GLOBAL.innodb_file_per_table;
-SET GLOBAL innodb_file_per_table= 1;
-
let MYSQLD_DATADIR =`SELECT @@datadir`;
--echo #
@@ -68,5 +65,3 @@ DELIMITER ;$$
INSERT INTO t1 SET id=1,c294=1;
REPLACE t1 SET id=1,c294=1;
DROP TABLE t1;
-
-SET GLOBAL innodb_file_per_table= @save_per_table;
diff --git a/mysql-test/suite/innodb/t/partition_locking.test b/mysql-test/suite/innodb/t/partition_locking.test
index 13457c1d9be..c4e6bff9bbb 100644
--- a/mysql-test/suite/innodb/t/partition_locking.test
+++ b/mysql-test/suite/innodb/t/partition_locking.test
@@ -104,7 +104,7 @@ SELECT * FROM t1 LOCK IN SHARE MODE;
--error ER_LOCK_WAIT_TIMEOUT
SELECT * FROM t1 LOCK IN SHARE MODE NOWAIT;
SELECT * FROM t1 ORDER BY d LOCK IN SHARE MODE SKIP LOCKED;
-SELECT a,count(b) FROM t1 GROUP BY a ORDER BY a LOCK IN SHARE MODE SKIP LOCKED;
+SELECT a,count(b) FROM t1 force index (a) GROUP BY a ORDER BY a LOCK IN SHARE MODE SKIP LOCKED;
SELECT d,a,b,c FROM t1 partition (p1,p9,p11,p17) ORDER BY d
LOCK IN SHARE MODE SKIP LOCKED;
SELECT d,a,b,c FROM t1 ORDER BY d LOCK IN SHARE MODE SKIP LOCKED;
diff --git a/mysql-test/suite/innodb/t/row_format_redundant.opt b/mysql-test/suite/innodb/t/row_format_redundant.opt
index c44c611ed60..3147bad4713 100644
--- a/mysql-test/suite/innodb/t/row_format_redundant.opt
+++ b/mysql-test/suite/innodb/t/row_format_redundant.opt
@@ -1 +1,2 @@
--innodb-checksum-algorithm=crc32
+--innodb-undo-tablespaces=0
diff --git a/mysql-test/suite/innodb/t/row_format_redundant.test b/mysql-test/suite/innodb/t/row_format_redundant.test
index 6de7597e983..6b5a559fc18 100644
--- a/mysql-test/suite/innodb/t/row_format_redundant.test
+++ b/mysql-test/suite/innodb/t/row_format_redundant.test
@@ -26,14 +26,11 @@ let bugdir= $MYSQLTEST_VARDIR/tmp/row_format_redundant;
SET GLOBAL innodb_fast_shutdown=0;
--source include/restart_mysqld.inc
-SET GLOBAL innodb_file_per_table=1;
-
--echo #
--echo # Bug#21644827 - FTS, ASSERT !SRV_READ_ONLY_MODE || M_IMPL.M_LOG_MODE ==
--echo # MTR_LOG_NO_REDO
--echo #
-SET GLOBAL innodb_file_per_table=ON;
create table t1 (a int not null, d varchar(15) not null, b
varchar(198) not null, c char(156)) engine=InnoDB
row_format=redundant;
@@ -61,9 +58,6 @@ row_format=redundant;
insert into t3 values(444, 'dddd', 'bbbbb', 'aaaaa');
insert into t3 values(555, 'eeee', 'ccccc', 'aaaaa');
-# read-only restart requires the change buffer to be empty; therefore we
-# do a slow shutdown.
-SET GLOBAL innodb_fast_shutdown=0;
--let $restart_parameters= $d --innodb-read-only
--source include/restart_mysqld.inc
@@ -150,7 +144,7 @@ RENAME TABLE t1 TO tee_one;
DROP TABLE t1;
DROP TABLE t2,t3;
---let SEARCH_PATTERN= \[ERROR\] InnoDB: Table test/t1 in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=1 SYS_TABLES\.MIX_LEN=511\b
+--let SEARCH_PATTERN= \[ERROR\] InnoDB: Table test/t1 in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=1 SYS_TABLES\.MIX_LEN=511\b.*
--source include/search_pattern_in_file.inc
--let $restart_parameters=
diff --git a/mysql-test/suite/innodb/t/table_flags.opt b/mysql-test/suite/innodb/t/table_flags.opt
index bca674950d2..3147bad4713 100644
--- a/mysql-test/suite/innodb/t/table_flags.opt
+++ b/mysql-test/suite/innodb/t/table_flags.opt
@@ -1,2 +1,2 @@
--innodb-checksum-algorithm=crc32
---skip-innodb-read-only-compressed
+--innodb-undo-tablespaces=0
diff --git a/mysql-test/suite/innodb/t/table_flags.test b/mysql-test/suite/innodb/t/table_flags.test
index 6b2e6ee3ea6..1c3efa1fbba 100644
--- a/mysql-test/suite/innodb/t/table_flags.test
+++ b/mysql-test/suite/innodb/t/table_flags.test
@@ -40,7 +40,6 @@ let bugdir= $MYSQLTEST_VARDIR/tmp/table_flags;
--let $restart_parameters=$d --innodb-stats-persistent=0
--source include/restart_mysqld.inc
-SET GLOBAL innodb_file_per_table=1;
CREATE TABLE tr(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
CREATE TABLE tc(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=COMPACT;
CREATE TABLE td(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
diff --git a/mysql-test/suite/innodb_fts/r/crash_recovery.result b/mysql-test/suite/innodb_fts/r/crash_recovery.result
index 83e5ddaea90..52a58c7034d 100644
--- a/mysql-test/suite/innodb_fts/r/crash_recovery.result
+++ b/mysql-test/suite/innodb_fts/r/crash_recovery.result
@@ -161,6 +161,8 @@ DROP TABLE mdev19073, mdev19073_2;
# MDEV-28706 Redundant InnoDB table fails during alter
#
SET @@global.innodb_file_per_table = 0;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t1 (
col_int INTEGER, col_text TEXT,
col_text_1 TEXT
diff --git a/mysql-test/suite/innodb_fts/r/fulltext.result b/mysql-test/suite/innodb_fts/r/fulltext.result
index 32dc87daa04..2a6fb81f4bb 100644
--- a/mysql-test/suite/innodb_fts/r/fulltext.result
+++ b/mysql-test/suite/innodb_fts/r/fulltext.result
@@ -537,6 +537,9 @@ CREATE TABLE t2 (a int, b2 char(10), FULLTEXT KEY b2 (b2)) ENGINE = InnoDB;
INSERT INTO t2 VALUES (1,'Scargill');
CREATE TABLE t3 (a int, b int) ENGINE = InnoDB;
INSERT INTO t3 VALUES (1,1), (2,1);
+SELECT * FROM t2 where MATCH(b2) AGAINST('scargill' IN BOOLEAN MODE);
+a b2
+1 Scargill
# t2 should use full text index
EXPLAIN
SELECT count(*) FROM t1 WHERE
@@ -546,8 +549,8 @@ WHERE t3.a=t1.a AND MATCH(b2) AGAINST('scargill' IN BOOLEAN MODE)
);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 MATERIALIZED t2 fulltext b2 b2 0 1 Using where
-2 MATERIALIZED t3 ALL NULL NULL NULL NULL 2 Using where
+2 DEPENDENT SUBQUERY t2 fulltext b2 b2 0 1 Using where
+2 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 2 Using where
# should return 0
SELECT count(*) FROM t1 WHERE
not exists(
@@ -721,6 +724,8 @@ DROP TABLE t1;
#
SET @save = @@global.innodb_file_per_table;
SET @@global.innodb_file_per_table = 0;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t1 (
col_int INTEGER, col_text TEXT,
col_int_g INTEGER GENERATED ALWAYS AS (col_int)
@@ -730,6 +735,8 @@ ALTER TABLE t1 DROP KEY `ftidx` ;
INSERT INTO t1 (col_int, col_text) VALUES ( 1255, NULL);
DROP TABLE t1;
SET @@global.innodb_file_per_table = @save;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
#
# MDEV-20797 FULLTEXT search with apostrophe,
# and mandatory words
diff --git a/mysql-test/suite/innodb_fts/r/fulltext_misc.result b/mysql-test/suite/innodb_fts/r/fulltext_misc.result
index 69812ff8b72..3aa72dd5623 100644
--- a/mysql-test/suite/innodb_fts/r/fulltext_misc.result
+++ b/mysql-test/suite/innodb_fts/r/fulltext_misc.result
@@ -8,9 +8,9 @@ EXPLAIN SELECT 1 FROM t1
WHERE 1 > ALL((SELECT 1 FROM t1 JOIN t1 a ON (MATCH(t1.f1) AGAINST (""))
WHERE t1.f1 GROUP BY t1.f1));
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 index NULL f1_2 8 NULL 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 1
2 SUBQUERY t1 fulltext f1_2,f1 f1 0 1 Using where
-2 SUBQUERY a index NULL f1_2 8 NULL 1 Using index
+2 SUBQUERY a ALL NULL NULL NULL NULL 1
PREPARE stmt FROM
'EXPLAIN SELECT 1 FROM t1
WHERE 1 > ALL((SELECT 1 FROM t1 RIGHT OUTER JOIN t1 a
@@ -18,14 +18,14 @@ PREPARE stmt FROM
WHERE t1.f1 GROUP BY t1.f1))';
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 index NULL f1_2 8 NULL 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 1
2 SUBQUERY t1 fulltext f1_2,f1 f1 0 1 Using where
-2 SUBQUERY a index NULL f1_2 8 NULL 1 Using index
+2 SUBQUERY a ALL NULL NULL NULL NULL 1
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 index NULL f1_2 8 NULL 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 1
2 SUBQUERY t1 fulltext f1_2,f1 f1 0 1 Using where
-2 SUBQUERY a index NULL f1_2 8 NULL 1 Using index
+2 SUBQUERY a ALL NULL NULL NULL NULL 1
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM
'EXPLAIN SELECT 1 FROM t1
@@ -34,14 +34,14 @@ PREPARE stmt FROM
WHERE t1.f1 GROUP BY t1.f1))';
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 index NULL f1_2 8 NULL 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 1
2 SUBQUERY t1 fulltext f1_2,f1 f1 0 1 Using where
-2 SUBQUERY a index NULL f1_2 8 NULL 1 Using index
+2 SUBQUERY a ALL NULL NULL NULL NULL 1
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 index NULL f1_2 8 NULL 1 Using index
+1 PRIMARY t1 ALL NULL NULL NULL NULL 1
2 SUBQUERY t1 fulltext f1_2,f1 f1 0 1 Using where
-2 SUBQUERY a index NULL f1_2 8 NULL 1 Using index
+2 SUBQUERY a ALL NULL NULL NULL NULL 1
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
drop table if exists t1;
diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
index 52cbede7314..6d7a5647a63 100644
--- a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
+++ b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
@@ -466,7 +466,8 @@ ROLLBACK;
SELECT * FROM t2 WHERE MATCH(s2) AGAINST ('Lollipops');
s1 s2
DROP TABLE t2 , t1;
-set global innodb_file_per_table=1;
+SET @save_innodb_read_only_compressed=@@GLOBAL.innodb_read_only_compressed;
+SET GLOBAL innodb_read_only_compressed=OFF;
CREATE TABLE t1 (
id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY,
a VARCHAR(200),
@@ -649,7 +650,7 @@ id a b
6 MYSQL SECURITY when configured properly, mysql ...
7 TEST QUERY EXPANSION for database ...
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table=1;
+SET GLOBAL innodb_read_only_compressed=@save_innodb_read_only_compressed;
CREATE TABLE t1 (
id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY,
a VARCHAR(200),
diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_proximity.result b/mysql-test/suite/innodb_fts/r/innodb_fts_proximity.result
index d67981e0851..0cbe1090e89 100644
--- a/mysql-test/suite/innodb_fts/r/innodb_fts_proximity.result
+++ b/mysql-test/suite/innodb_fts/r/innodb_fts_proximity.result
@@ -1,4 +1,3 @@
-drop table if exists t1;
CREATE TABLE t1 (
id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY,
a VARCHAR(200),
@@ -133,7 +132,6 @@ AGAINST ('"xyz blob"@3' IN BOOLEAN MODE);
count(*)
2
DROP TABLE t1;
-set global innodb_file_per_table=1;
CREATE TABLE t1 (
id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY,
a TEXT,
@@ -214,4 +212,3 @@ AGAINST ('"very blob"@3' IN BOOLEAN MODE);
count(*)
1
DROP TABLE t1;
-SET GLOBAL innodb_file_per_table=1;
diff --git a/mysql-test/suite/innodb_fts/t/fulltext.test b/mysql-test/suite/innodb_fts/t/fulltext.test
index 3ddc1856b7f..d9bf14cbe6b 100644
--- a/mysql-test/suite/innodb_fts/t/fulltext.test
+++ b/mysql-test/suite/innodb_fts/t/fulltext.test
@@ -570,6 +570,8 @@ INSERT INTO t2 VALUES (1,'Scargill');
CREATE TABLE t3 (a int, b int) ENGINE = InnoDB;
INSERT INTO t3 VALUES (1,1), (2,1);
+SELECT * FROM t2 where MATCH(b2) AGAINST('scargill' IN BOOLEAN MODE);
+
--echo # t2 should use full text index
EXPLAIN
SELECT count(*) FROM t1 WHERE
diff --git a/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test
index 4eaf5b2e0bd..c0836372e9e 100644
--- a/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test
+++ b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test
@@ -427,15 +427,9 @@ DROP TABLE t2 , t1;
# FTS index with compressed row format
#------------------------------------------------------------------------------
-# Save innodb variables
-let $innodb_file_per_table_orig=`select @@innodb_file_per_table`;
-
-set global innodb_file_per_table=1;
-
---disable_query_log
SET @save_innodb_read_only_compressed=@@GLOBAL.innodb_read_only_compressed;
SET GLOBAL innodb_read_only_compressed=OFF;
---enable_query_log
+
# Create FTS table
CREATE TABLE t1 (
id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY,
@@ -583,10 +577,7 @@ SELECT * FROM t1 WHERE MATCH (a,b)
SELECT * FROM t1 ORDER BY id;
DROP TABLE t1;
---disable_query_log
SET GLOBAL innodb_read_only_compressed=@save_innodb_read_only_compressed;
---enable_query_log
-eval SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig;
#------------------------------------------------------------------------------
# FTS index with utf8 character testcase
diff --git a/mysql-test/suite/innodb_fts/t/innodb_fts_proximity.test b/mysql-test/suite/innodb_fts/t/innodb_fts_proximity.test
index e3d8eb0c13b..25ad4f0355f 100644
--- a/mysql-test/suite/innodb_fts/t/innodb_fts_proximity.test
+++ b/mysql-test/suite/innodb_fts/t/innodb_fts_proximity.test
@@ -5,14 +5,6 @@
# Functional testing with FTS proximity search using '@'
# and try search default words
---disable_warnings
-drop table if exists t1;
---enable_warnings
-
---disable_query_log
-let $innodb_file_per_table_orig = `select @@innodb_file_per_table`;
---enable_query_log
-
# Create FTS table
CREATE TABLE t1 (
id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY,
@@ -161,8 +153,6 @@ SELECT count(*) FROM t1
DROP TABLE t1;
-set global innodb_file_per_table=1;
-
# Test fts with externally stored long column
CREATE TABLE t1 (
id INT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY,
@@ -257,5 +247,3 @@ SELECT count(*) FROM t1
AGAINST ('"very blob"@3' IN BOOLEAN MODE);
DROP TABLE t1;
-
-eval SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig;
diff --git a/mysql-test/suite/innodb_gis/r/create_spatial_index.result b/mysql-test/suite/innodb_gis/r/create_spatial_index.result
index d3c69294c10..30b41b41e62 100644
--- a/mysql-test/suite/innodb_gis/r/create_spatial_index.result
+++ b/mysql-test/suite/innodb_gis/r/create_spatial_index.result
@@ -57,10 +57,14 @@ ANALYZE TABLE tab;
Table Op Msg_type Msg_text
test.tab analyze status Engine-independent statistics collected
test.tab analyze status OK
+# Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
-EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
+EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1+0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
3 POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))
@@ -74,7 +78,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText('LINESTRING( 300 300,400 400)');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
4 POLYGON((300 300,400 400,500 500,300 500,300 400,300 300))
@@ -85,10 +89,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRContains(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRWithin
SET @g1 = ST_GeomFromText( 'POLYGON((30 30,40 40,50 50,30 50,30 40,30 30)) ');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -99,10 +104,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the ST_Crosses
SET @g1 = ST_GeomFromText('POLYGON((100 200,200 300,400 500,500 300,300 200,100 300,100 200))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Crosses(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Crosses(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
4 POLYGON((300 300,400 400,500 500,300 500,300 400,300 300))
@@ -116,7 +122,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText('LINESTRING( 10 10,30 30,40 40)');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_CRosses(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Crosses(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -127,6 +133,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE ST_Crosses(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -145,14 +152,15 @@ c1 ST_Astext(c4)
EXPLAIN UPDATE tab SET C2 = ST_GeomFromText('POINT(0 0)')
WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
+# Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
10 POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))
@@ -166,7 +174,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 2 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -182,7 +190,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText('LINESTRING( 30 30,40 40,50 50)');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -194,10 +202,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the Overelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
3 POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))
@@ -218,14 +227,15 @@ c1 ST_Astext(c4)
EXPLAIN UPDATE tab SET C2 = ST_GeomFromText('POINT(0 0)')
WHERE MBROverlaps(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 8 Using where
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 8 Using where
+# Test the ST_Touches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 2 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
2 POLYGON((40 50,40 70,50 100,70 100,80 80,70 50,40 50))
@@ -239,7 +249,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText('LINESTRING( 100 100,200 200,300 300)');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
4 POLYGON((300 300,400 400,500 500,300 500,300 400,300 300))
@@ -250,10 +260,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE ST_Touches(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
3 POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))
@@ -267,7 +278,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText( 'POLYGON((30 30,40 40,50 50,30 50,30 40,30 30)) ');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -278,6 +289,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -296,14 +308,15 @@ c1 ST_Astext(c4)
EXPLAIN UPDATE tab SET C2 = ST_GeomFromText('POINT(0 0)')
WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
+# Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
10 POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))
@@ -314,10 +327,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRintersects
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 2 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -333,7 +347,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText('LINESTRING( 30 30,40 40,50 50)');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -345,10 +359,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBROverelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
3 POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))
@@ -359,10 +374,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRTouches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRTouches(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 2 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRTouches(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
2 POLYGON((40 50,40 70,50 100,70 100,80 80,70 50,40 50))
@@ -373,6 +389,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRTouches(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 2 Using where
+# Test with Procedure
CREATE PROCEDURE proc_wl6968()
BEGIN
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
@@ -383,11 +400,12 @@ EXPLAIN DELETE FROM tab WHERE MBRContains(tab.c4, @g1);
END |
CALL proc_wl6968();
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the Delete & Update
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
@@ -486,6 +504,7 @@ ANALYZE TABLE tab;
Table Op Msg_type Msg_text
test.tab analyze status Engine-independent statistics collected
test.tab analyze status OK
+# Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -514,6 +533,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRContains(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRWithin
SET @g1 = ST_GeomFromText( 'POLYGON((30 30,40 40,50 50,30 50,30 40,30 30)) ');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -528,6 +548,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the ST_Crosses
SET @g1 = ST_GeomFromText('POLYGON((100 200,200 300,400 500,500 300,300 200,100 300,100 200))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Crosses(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -556,10 +577,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE ST_Crosses(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where; Using filesort
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where; Using filesort
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -574,10 +596,11 @@ c1 ST_Astext(c4)
EXPLAIN UPDATE tab SET C2 = ST_GeomFromText('POINT(0 0)')
WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
+# Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -592,6 +615,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRintersects
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -623,6 +647,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the Overelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -640,17 +665,18 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText('LINESTRING(7 1,30 30,1010 3010,1010 2010,3010 3010,4010 4010,5010 5010 )');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where; Using filesort
+1 SIMPLE tab range idx3 idx3 34 NULL 8 Using where; Using filesort
SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
3 POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))
EXPLAIN UPDATE tab SET C2 = ST_GeomFromText('POINT(0 0)')
WHERE MBROverlaps(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 8 Using where
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 8 Using where
+# Test the ST_Touches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -679,6 +705,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE ST_Touches(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -707,10 +734,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where; Using filesort
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where; Using filesort
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -725,10 +753,11 @@ c1 ST_Astext(c4)
EXPLAIN UPDATE tab SET C2 = ST_GeomFromText('POINT(0 0)')
WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
+# Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -743,6 +772,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRintersects
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -774,6 +804,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBROverelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -788,6 +819,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRTouches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRTouches(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -802,6 +834,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRTouches(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 2 Using where
+# Test with Procedure
CREATE PROCEDURE proc_wl6968()
BEGIN
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
@@ -817,6 +850,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the Delete & Update
SET @g1 = ST_GeomFromText( 'POLYGON((30 30,40 40,50 50,30 50,30 40,30 30)) ');
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
@@ -904,10 +938,11 @@ ANALYZE TABLE tab;
Table Op Msg_type Msg_text
test.tab analyze status Engine-independent statistics collected
test.tab analyze status OK
+# Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
3 POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))
@@ -921,7 +956,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText('LINESTRING( 300 300,400 400)');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
4 POLYGON((300 300,400 400,500 500,300 500,300 400,300 300))
@@ -932,10 +967,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRContains(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRWithin
SET @g1 = ST_GeomFromText( 'POLYGON((30 30,40 40,50 50,30 50,30 40,30 30)) ');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -946,10 +982,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the ST_Crosses
SET @g1 = ST_GeomFromText('POLYGON((100 200,200 300,400 500,500 300,300 200,100 300,100 200))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Crosses(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Crosses(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
4 POLYGON((300 300,400 400,500 500,300 500,300 400,300 300))
@@ -963,7 +1000,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText('LINESTRING( 10 10,30 30,40 40)');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_CRosses(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Crosses(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -974,6 +1011,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE ST_Crosses(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -992,14 +1030,15 @@ c1 ST_Astext(c4)
EXPLAIN UPDATE tab SET C2 = ST_GeomFromText('POINT(0 0)')
WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
+# Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
10 POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))
@@ -1010,10 +1049,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRintersects
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 2 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -1029,7 +1069,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText('LINESTRING( 30 30,40 40,50 50)');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -1041,10 +1081,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the Overelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
3 POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))
@@ -1065,14 +1106,15 @@ c1 ST_Astext(c4)
EXPLAIN UPDATE tab SET C2 = ST_GeomFromText('POINT(0 0)')
WHERE MBROverlaps(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 8 Using where
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 8 Using where
+# Test the ST_Touches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 2 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
2 POLYGON((40 50,40 70,50 100,70 100,80 80,70 50,40 50))
@@ -1086,7 +1128,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText('LINESTRING( 100 100,200 200,300 300)');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
4 POLYGON((300 300,400 400,500 500,300 500,300 400,300 300))
@@ -1097,10 +1139,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE ST_Touches(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
3 POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))
@@ -1114,7 +1157,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText( 'POLYGON((30 30,40 40,50 50,30 50,30 40,30 30)) ');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -1125,6 +1168,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1143,14 +1187,15 @@ c1 ST_Astext(c4)
EXPLAIN UPDATE tab SET C2 = ST_GeomFromText('POINT(0 0)')
WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab ALL idx3 NULL NULL NULL 10 Using where
+1 SIMPLE tab range idx3 idx3 34 NULL 9 Using where
+# Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
10 POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))
@@ -1161,10 +1206,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRintersects
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 2 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -1180,7 +1226,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SET @g1 = ST_GeomFromText('LINESTRING( 30 30,40 40,50 50)');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
1 POLYGON((30 30,40 40,50 50,30 50,30 40,30 30))
@@ -1192,10 +1238,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBROverelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
3 POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))
@@ -1206,10 +1253,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 1 Using where
+# Test the MBRTouches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRTouches(tab.c4, @g1) ORDER BY c1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE tab range idx3 idx3 34 NULL 2 Using where; Using filesort
+1 SIMPLE tab index idx3 PRIMARY 4 NULL 10 Using where
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRTouches(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
2 POLYGON((40 50,40 70,50 100,70 100,80 80,70 50,40 50))
@@ -1220,6 +1268,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN DELETE FROM tab WHERE MBRTouches(tab.c4, @g1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE tab range idx3 idx3 34 NULL 2 Using where
+# Test the Delete & Update
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
c1 ST_Astext(c4)
@@ -1243,6 +1292,7 @@ CHECK TABLE tab;
Table Op Msg_type Msg_text
test.tab check status OK
DROP TABLE tab;
+# Test check constraint on spatial column
CREATE TABLE tab(c1 POINT NOT NULL,CONSTRAINT tab_const check(c1 > 0) ) ENGINE=InnoDB;
ERROR HY000: Illegal parameter data types point and int for operation '>'
CREATE TABLE tab(c1 POINT NOT NULL,CONSTRAINT tab_const check(CAST(c1 AS BINARY) > 0) ) ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb_gis/r/rtree.result b/mysql-test/suite/innodb_gis/r/rtree.result
index 2fd39f9ca32..9ddcb841cbc 100644
--- a/mysql-test/suite/innodb_gis/r/rtree.result
+++ b/mysql-test/suite/innodb_gis/r/rtree.result
@@ -11,11 +11,11 @@ test.t1 analyze status OK
set @g1 = ST_GeomFromText('Polygon((0 0,0 3,3 3,3 0,0 0))');
explain select ST_astext(t1.g) from t1 where MBRWithin(t1.g, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL g NULL NULL NULL 5 Using where
+1 SIMPLE t1 range g g 34 NULL 5 Using where
select ST_astext(t1.g) from t1 where MBRWithin(t1.g, @g1);
ST_astext(t1.g)
-POINT(1 1)
POINT(1.5 1.5)
+POINT(1 1)
set @g1 = ST_GeomFromText('Polygon((0 0,0 3,3 3,3 0,0 0))');
delete from t1 where MBRWithin(t1.g, @g1);
check table t1;
@@ -157,11 +157,11 @@ test.t1 analyze status OK
set @g1 = ST_GeomFromText('Polygon((0 0,0 3,3 3,3 0,0 0))');
explain select ST_astext(t1.g) from t1 where MBRWithin(t1.g, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL g NULL NULL NULL 5 Using where
+1 SIMPLE t1 range g g 34 NULL 5 Using where
select ST_astext(t1.g) from t1 where MBRWithin(t1.g, @g1);
ST_astext(t1.g)
-POINT(1 1)
POINT(1.5 1.5)
+POINT(1 1)
set @g1 = ST_GeomFromText('Polygon((0 0,0 3,3 3,3 0,0 0))');
delete from t1 where MBRWithin(t1.g, @g1);
check table t1;
diff --git a/mysql-test/suite/innodb_gis/r/rtree_multi_pk.result b/mysql-test/suite/innodb_gis/r/rtree_multi_pk.result
index 3e6464df997..bab710a5559 100644
--- a/mysql-test/suite/innodb_gis/r/rtree_multi_pk.result
+++ b/mysql-test/suite/innodb_gis/r/rtree_multi_pk.result
@@ -11,11 +11,11 @@ test.t1 analyze status OK
set @g1 = ST_GeomFromText('Polygon((0 0,0 3,3 3,3 0,0 0))');
explain select ST_astext(t1.g) from t1 where MBRWithin(t1.g, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL g NULL NULL NULL 5 Using where
+1 SIMPLE t1 range g g 34 NULL 5 Using where
select ST_astext(t1.g) from t1 where MBRWithin(t1.g, @g1);
ST_astext(t1.g)
-POINT(1 1)
POINT(1.5 1.5)
+POINT(1 1)
set @g1 = ST_GeomFromText('Polygon((0 0,0 3,3 3,3 0,0 0))');
delete from t1 where MBRWithin(t1.g, @g1);
check table t1;
@@ -63,10 +63,10 @@ name ST_AsText(square)
small POLYGON((0 0,0 1,1 1,1 0,0 0))
SELECT name, ST_AsText(square) from t1 where MBRDisjoint(@p, square);
name ST_AsText(square)
-up3 POLYGON((0 3,0 5,2 5,2 3,0 3))
down3 POLYGON((0 -3,0 -1,2 -1,2 -3,0 -3))
-right3 POLYGON((3 0,3 2,5 2,5 0,3 0))
left3 POLYGON((-3 0,-3 2,-1 2,-1 0,-3 0))
+right3 POLYGON((3 0,3 2,5 2,5 0,3 0))
+up3 POLYGON((0 3,0 5,2 5,2 3,0 3))
SELECT name, ST_AsText(square) from t1 where MBREquals(@p, square);
name ST_AsText(square)
SELECT name, ST_AsText(square) from t1 where MBRIntersects(@p, square);
diff --git a/mysql-test/suite/innodb_gis/t/create_spatial_index.test b/mysql-test/suite/innodb_gis/t/create_spatial_index.test
index 5278292b56c..178041d0414 100644
--- a/mysql-test/suite/innodb_gis/t/create_spatial_index.test
+++ b/mysql-test/suite/innodb_gis/t/create_spatial_index.test
@@ -94,9 +94,11 @@ ANALYZE TABLE tab;
# Check the spatial relationship between 2 GIS shapes
-# Test the MBRContains
+--echo # Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
+# Show plan if we cannot use index order
+EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1+0;
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
@@ -117,7 +119,7 @@ WHERE MBRContains(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRContains(tab.c4, @g1);
-# Test the MBRWithin
+--echo # Test the MBRWithin
SET @g1 = ST_GeomFromText( 'POLYGON((30 30,40 40,50 50,30 50,30 40,30 30)) ');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
@@ -129,7 +131,7 @@ WHERE MBRWithin(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
-# Test the ST_Crosses
+--echo # Test the ST_Crosses
SET @g1 = ST_GeomFromText('POLYGON((100 200,200 300,400 500,500 300,300 200,100 300,100 200))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Crosses(tab.c4, @g1) ORDER BY c1;
@@ -152,7 +154,7 @@ WHERE ST_Crosses(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE ST_Crosses(tab.c4, @g1);
-# Test the MBRDisjoint
+--echo # Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
@@ -164,7 +166,7 @@ WHERE MBRDisjoint(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
-# Test the MBREquals
+--echo # Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
@@ -176,7 +178,6 @@ WHERE MBREquals(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
-# Test the MBRintersects
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
@@ -199,7 +200,7 @@ WHERE MBRintersects(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
-# Test the Overelaps
+--echo # Test the Overelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
@@ -222,7 +223,7 @@ WHERE MBROverlaps(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
-# Test the ST_Touches
+--echo # Test the ST_Touches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
@@ -245,7 +246,7 @@ WHERE ST_Touches(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE ST_Touches(tab.c4, @g1);
-# Test the MBRContains
+--echo # Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
@@ -268,7 +269,7 @@ WHERE MBRWithin(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
-# Test the MBRDisjoint
+--echo # Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
@@ -280,7 +281,7 @@ WHERE MBRDisjoint(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
-# Test the MBREquals
+--echo # Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
@@ -292,7 +293,7 @@ WHERE MBREquals(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
-# Test the MBRintersects
+--echo # Test the MBRintersects
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
@@ -315,7 +316,7 @@ WHERE MBRintersects(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
-# Test the MBROverelaps
+--echo # Test the MBROverelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
@@ -327,7 +328,7 @@ WHERE MBROverlaps(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
-# Test the MBRTouches
+--echo # Test the MBRTouches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRTouches(tab.c4, @g1) ORDER BY c1;
@@ -339,7 +340,7 @@ WHERE MBRTouches(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRTouches(tab.c4, @g1);
-# Test with Procedure
+--echo # Test with Procedure
delimiter |;
CREATE PROCEDURE proc_wl6968()
@@ -357,7 +358,7 @@ delimiter ;|
CALL proc_wl6968();
-# Test the Delete & Update
+--echo # Test the Delete & Update
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
@@ -478,7 +479,7 @@ ANALYZE TABLE tab;
# Check the spatial relationship between 2 GIS shapes
-# Test the MBRContains
+--echo # Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
@@ -501,7 +502,7 @@ WHERE MBRContains(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRContains(tab.c4, @g1);
-# Test the MBRWithin
+--echo # Test the MBRWithin
SET @g1 = ST_GeomFromText( 'POLYGON((30 30,40 40,50 50,30 50,30 40,30 30)) ');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
@@ -513,7 +514,7 @@ WHERE MBRWithin(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
-# Test the ST_Crosses
+--echo # Test the ST_Crosses
SET @g1 = ST_GeomFromText('POLYGON((100 200,200 300,400 500,500 300,300 200,100 300,100 200))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Crosses(tab.c4, @g1) ORDER BY c1;
@@ -536,7 +537,7 @@ WHERE ST_Crosses(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE ST_Crosses(tab.c4, @g1);
-# Test the MBRDisjoint
+--echo # Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
@@ -548,7 +549,7 @@ WHERE MBRDisjoint(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
-# Test the MBREquals
+--echo # Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
@@ -560,7 +561,7 @@ WHERE MBREquals(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
-# Test the MBRintersects
+--echo # Test the MBRintersects
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
@@ -583,7 +584,7 @@ WHERE MBRintersects(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
-# Test the Overelaps
+--echo # Test the Overelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
@@ -606,7 +607,7 @@ WHERE MBROverlaps(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
-# Test the ST_Touches
+--echo # Test the ST_Touches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
@@ -629,7 +630,7 @@ WHERE ST_Touches(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE ST_Touches(tab.c4, @g1);
-# Test the MBRContains
+--echo # Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
@@ -652,7 +653,7 @@ WHERE MBRWithin(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
-# Test the MBRDisjoint
+--echo # Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
@@ -664,7 +665,7 @@ WHERE MBRDisjoint(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
-# Test the MBREquals
+--echo # Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
@@ -676,7 +677,7 @@ WHERE MBREquals(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
-# Test the MBRintersects
+--echo # Test the MBRintersects
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
@@ -699,7 +700,7 @@ WHERE MBRintersects(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
-# Test the MBROverelaps
+--echo # Test the MBROverelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
@@ -711,7 +712,7 @@ WHERE MBROverlaps(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
-# Test the MBRTouches
+--echo # Test the MBRTouches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRTouches(tab.c4, @g1) ORDER BY c1;
@@ -723,7 +724,7 @@ WHERE MBRTouches(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRTouches(tab.c4, @g1);
-# Test with Procedure
+--echo # Test with Procedure
delimiter |;
CREATE PROCEDURE proc_wl6968()
@@ -741,7 +742,7 @@ delimiter ;|
CALL proc_wl6968();
-# Test the Delete & Update
+--echo # Test the Delete & Update
SET @g1 = ST_GeomFromText( 'POLYGON((30 30,40 40,50 50,30 50,30 40,30 30)) ');
SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
@@ -850,7 +851,7 @@ ANALYZE TABLE tab;
# Check the spatial relationship between 2 GIS shapes
-# Test the MBRContains
+--echo # Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
@@ -873,7 +874,7 @@ WHERE MBRContains(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRContains(tab.c4, @g1);
-# Test the MBRWithin
+--echo # Test the MBRWithin
SET @g1 = ST_GeomFromText( 'POLYGON((30 30,40 40,50 50,30 50,30 40,30 30)) ');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRWithin(tab.c4, @g1) ORDER BY c1;
@@ -885,7 +886,7 @@ WHERE MBRWithin(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
-# Test the ST_Crosses
+--echo # Test the ST_Crosses
SET @g1 = ST_GeomFromText('POLYGON((100 200,200 300,400 500,500 300,300 200,100 300,100 200))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Crosses(tab.c4, @g1) ORDER BY c1;
@@ -908,7 +909,7 @@ WHERE ST_Crosses(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE ST_Crosses(tab.c4, @g1);
-# Test the MBRDisjoint
+--echo # Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
@@ -920,7 +921,7 @@ WHERE MBRDisjoint(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
-# Test the MBREquals
+--echo # Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
@@ -932,7 +933,7 @@ WHERE MBREquals(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
-# Test the MBRintersects
+--echo # Test the MBRintersects
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
@@ -955,7 +956,7 @@ WHERE MBRintersects(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
-# Test the Overelaps
+--echo # Test the Overelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
@@ -978,7 +979,7 @@ WHERE MBROverlaps(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
-# Test the ST_Touches
+--echo # Test the ST_Touches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE ST_Touches(tab.c4, @g1) ORDER BY c1;
@@ -1001,7 +1002,7 @@ WHERE ST_Touches(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE ST_Touches(tab.c4, @g1);
-# Test the MBRContains
+--echo # Test the MBRContains
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
@@ -1024,7 +1025,7 @@ WHERE MBRWithin(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRWithin(tab.c4, @g1);
-# Test the MBRDisjoint
+--echo # Test the MBRDisjoint
SET @g1 = ST_GeomFromText('POLYGON((4 -2,5 -4,6 -5,7 -4,7 2,4 -2))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRDisjoint(tab.c4, @g1) ORDER BY c1;
@@ -1036,7 +1037,7 @@ WHERE MBRDisjoint(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRDisjoint(tab.c4, @g1);
-# Test the MBREquals
+--echo # Test the MBREquals
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
@@ -1048,7 +1049,7 @@ WHERE MBREquals(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBREquals(tab.c4, @g1);
-# Test the MBRintersects
+--echo # Test the MBRintersects
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRIntersects(tab.c4, @g1) ORDER BY c1;
@@ -1071,7 +1072,7 @@ WHERE MBRintersects(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRintersects(tab.c4, @g1);
-# Test the MBROverelaps
+--echo # Test the MBROverelaps
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 2,4 5,5 5,7 1,0 0 ))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBROverlaps(tab.c4, @g1) ORDER BY c1;
@@ -1083,7 +1084,7 @@ WHERE MBROverlaps(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBROverlaps(tab.c4, @g1);
-# Test the MBRTouches
+--echo # Test the MBRTouches
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRTouches(tab.c4, @g1) ORDER BY c1;
@@ -1095,7 +1096,7 @@ WHERE MBRTouches(tab.c4, @g1);
EXPLAIN DELETE FROM tab WHERE MBRTouches(tab.c4, @g1);
-# Test the Delete & Update
+--echo # Test the Delete & Update
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
SELECT c1,ST_Astext(c4) FROM tab WHERE MBREquals(tab.c4, @g1) ORDER BY c1;
@@ -1124,7 +1125,7 @@ DROP TABLE tab;
# End of Testcase compress table with Auto_increment
-# Test check constraint on spatial column
+--echo # Test check constraint on spatial column
--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
CREATE TABLE tab(c1 POINT NOT NULL,CONSTRAINT tab_const check(c1 > 0) ) ENGINE=InnoDB;
CREATE TABLE tab(c1 POINT NOT NULL,CONSTRAINT tab_const check(CAST(c1 AS BINARY) > 0) ) ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb_gis/t/rtree_multi_pk.test b/mysql-test/suite/innodb_gis/t/rtree_multi_pk.test
index f606e569376..567e16947b7 100644
--- a/mysql-test/suite/innodb_gis/t/rtree_multi_pk.test
+++ b/mysql-test/suite/innodb_gis/t/rtree_multi_pk.test
@@ -66,6 +66,7 @@ INSERT INTO t1 VALUES("left3", ST_GeomFromText('POLYGON (( -3 0, -3 2, -1 2, -1
SET @p = ST_GeomFromText('POLYGON (( 0 0, 0 2, 2 2, 2 0, 0 0))');
SELECT name, ST_AsText(square) from t1 where MBRContains(@p, square);
+--sorted_result
SELECT name, ST_AsText(square) from t1 where MBRDisjoint(@p, square);
SELECT name, ST_AsText(square) from t1 where MBREquals(@p, square);
SELECT name, ST_AsText(square) from t1 where MBRIntersects(@p, square);
diff --git a/mysql-test/suite/innodb_zip/r/bug36169.result b/mysql-test/suite/innodb_zip/r/bug36169.result
index 7e165e0f7d4..c7e234b5ba1 100644
--- a/mysql-test/suite/innodb_zip/r/bug36169.result
+++ b/mysql-test/suite/innodb_zip/r/bug36169.result
@@ -1 +1 @@
-SET GLOBAL innodb_file_per_table=ON;
+DROP TABLE IF EXISTS table6;
diff --git a/mysql-test/suite/innodb_zip/r/bug53591.result b/mysql-test/suite/innodb_zip/r/bug53591.result
index 779064d054f..0c6386df022 100644
--- a/mysql-test/suite/innodb_zip/r/bug53591.result
+++ b/mysql-test/suite/innodb_zip/r/bug53591.result
@@ -1,9 +1,4 @@
call mtr.add_suppression('InnoDB: Cannot add field.*because after adding it, the row size is');
-SET GLOBAL innodb_file_per_table=on;
-SET GLOBAL innodb_strict_mode=on;
-set old_alter_table=0;
-Warnings:
-Warning 1287 '@@old_alter_table' is deprecated and will be removed in a future release. Please use '@@alter_algorithm' instead
CREATE TABLE bug53591(a text charset utf8 not null)
ENGINE=InnoDB KEY_BLOCK_SIZE=1;
ALTER TABLE bug53591 ADD PRIMARY KEY(a(220));
@@ -12,5 +7,3 @@ SHOW WARNINGS;
Level Code Message
Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is {checked_valid}. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
DROP TABLE bug53591;
-SET GLOBAL innodb_file_per_table=1;
-SET GLOBAL innodb_strict_mode=DEFAULT;
diff --git a/mysql-test/suite/innodb_zip/r/bug56680.result b/mysql-test/suite/innodb_zip/r/bug56680.result
index 1a1a5b40707..3c325351c3a 100644
--- a/mysql-test/suite/innodb_zip/r/bug56680.result
+++ b/mysql-test/suite/innodb_zip/r/bug56680.result
@@ -1,5 +1,4 @@
SET GLOBAL tx_isolation='REPEATABLE-READ';
-SET GLOBAL innodb_file_per_table=on;
CREATE TABLE bug56680(
a INT AUTO_INCREMENT PRIMARY KEY,
b CHAR(1),
diff --git a/mysql-test/suite/innodb_zip/r/cmp_drop_table.result b/mysql-test/suite/innodb_zip/r/cmp_drop_table.result
index e1f63268d0b..530932e99fb 100644
--- a/mysql-test/suite/innodb_zip/r/cmp_drop_table.result
+++ b/mysql-test/suite/innodb_zip/r/cmp_drop_table.result
@@ -1,4 +1,3 @@
-set global innodb_file_per_table=on;
create table t1(a text) engine=innodb key_block_size=4;
SELECT page_size FROM information_schema.innodb_cmpmem WHERE pages_used > 0;
page_size
diff --git a/mysql-test/suite/innodb_zip/r/create_options.result b/mysql-test/suite/innodb_zip/r/create_options.result
index a678d7a85f3..0f4c1239d73 100644
--- a/mysql-test/suite/innodb_zip/r/create_options.result
+++ b/mysql-test/suite/innodb_zip/r/create_options.result
@@ -1,5 +1,4 @@
SET default_storage_engine=InnoDB;
-SET GLOBAL innodb_file_per_table=ON;
SET SESSION innodb_strict_mode = ON;
# Test 1) StrictMode=ON, CREATE and ALTER with each ROW_FORMAT & KEY_BLOCK_SIZE=0
# KEY_BLOCK_SIZE=0 means 'no KEY_BLOCK_SIZE is specified'
@@ -280,6 +279,8 @@ Level Code Message
# innodb_file_per_table=OFF and that they can be set to default
# values during strict mode.
SET GLOBAL innodb_file_per_table=OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
DROP TABLE t1;
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=1;
Got one of the listed errors
@@ -354,9 +355,13 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic
SET GLOBAL innodb_file_per_table=ON;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
DROP TABLE t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
SET GLOBAL innodb_file_per_table=OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW WARNINGS;
Level Code Message
@@ -367,6 +372,8 @@ ALTER TABLE t1 ADD COLUMN f2 INT;
SHOW WARNINGS;
Level Code Message
SET GLOBAL innodb_file_per_table=ON;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
##################################################
SET SESSION innodb_strict_mode = OFF;
# Test 9) StrictMode=OFF, CREATE and ALTER with each ROW_FORMAT & KEY_BLOCK_SIZE=0
@@ -722,6 +729,8 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compressed row_format=COMPRESSED key_block_size=2
SET GLOBAL innodb_file_per_table=OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW WARNINGS;
Level Code Message
@@ -729,6 +738,8 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compressed row_format=COMPRESSED key_block_size=2
SET GLOBAL innodb_file_per_table=ON;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
ALTER TABLE t1 ADD COLUMN f2 INT;
SHOW WARNINGS;
Level Code Message
@@ -743,6 +754,8 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC
SET GLOBAL innodb_file_per_table=OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW WARNINGS;
Level Code Message
@@ -750,6 +763,8 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC
SET GLOBAL innodb_file_per_table=ON;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
ALTER TABLE t1 ADD COLUMN f2 INT;
SHOW WARNINGS;
Level Code Message
diff --git a/mysql-test/suite/innodb_zip/r/index_large_prefix.result b/mysql-test/suite/innodb_zip/r/index_large_prefix.result
index ce302327713..81bdcdeb740 100644
--- a/mysql-test/suite/innodb_zip/r/index_large_prefix.result
+++ b/mysql-test/suite/innodb_zip/r/index_large_prefix.result
@@ -1,5 +1,4 @@
SET default_storage_engine=InnoDB;
-set global innodb_file_per_table=1;
### Test 1 ###
create table worklog5743(a TEXT not null, primary key (a(1000))) ROW_FORMAT=DYNAMIC;
show warnings;
@@ -449,4 +448,3 @@ create index idx on worklog5743(a(768));
ERROR HY000: Index column size too large. The maximum column size is 767 bytes
create index idx2 on worklog5743(a(767));
drop table worklog5743;
-SET GLOBAL innodb_file_per_table=1;
diff --git a/mysql-test/suite/innodb_zip/r/innochecksum_2.result b/mysql-test/suite/innodb_zip/r/innochecksum_2.result
index 681d8e1f4c7..33d80b02ca8 100644
--- a/mysql-test/suite/innodb_zip/r/innochecksum_2.result
+++ b/mysql-test/suite/innodb_zip/r/innochecksum_2.result
@@ -88,5 +88,6 @@ merge 0
Number of pages:#
Number of pages:#
[4]:# Print the version of innochecksum and exit
-innochecksum Ver #.#.## Restart the DB server
+innochecksum from #.#.#-MariaDB, client #.# for OS (ARCH)
+# Restart the DB server
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb_zip/r/innodb-zip.result b/mysql-test/suite/innodb_zip/r/innodb-zip.result
index 0a3119f48a4..c44eefcbbb0 100644
--- a/mysql-test/suite/innodb_zip/r/innodb-zip.result
+++ b/mysql-test/suite/innodb_zip/r/innodb-zip.result
@@ -6,7 +6,10 @@ WHERE table_schema='mysqltest_innodb_zip';
table_name row_format data_length index_length
SET @save_innodb_stats_on_metadata=@@global.innodb_stats_on_metadata;
set session innodb_strict_mode=0;
+SET @save_fpt=@@GLOBAL.innodb_file_per_table;
set global innodb_file_per_table=off;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
SET @@global.innodb_stats_on_metadata=ON;
create table t0(a int primary key) engine=innodb row_format=compressed;
Warnings:
@@ -32,6 +35,8 @@ Warnings:
Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1.
set global innodb_file_per_table=on;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
create table t6(a int primary key) engine=innodb
key_block_size=1 row_format=redundant;
Warnings:
@@ -153,7 +158,6 @@ count(*)
1
update t1 set c3 = repeat('E', 20000) where c1 = 1;
drop table t1;
-set global innodb_file_per_table = on;
set innodb_strict_mode = off;
create table t1 (id int primary key) engine = innodb key_block_size = 0;
drop table t1;
@@ -247,6 +251,8 @@ Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB
SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql' and table_schema != 'sys' order by table_name;
table_schema table_name row_format data_length index_length
set global innodb_file_per_table = off;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
create table t1 (id int primary key) engine = innodb key_block_size = 1;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t1` (errno: 140 "Wrong create options")
show warnings;
@@ -286,4 +292,8 @@ mysqltest_innodb_zip t7 Dynamic {valid} 0
mysqltest_innodb_zip t8 Compact {valid} 0
mysqltest_innodb_zip t9 Redundant {valid} 0
drop table t7, t8, t9;
+SET GLOBAL innodb_file_per_table=@save_fpt;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
+SET @@global.innodb_stats_on_metadata=@save_innodb_stats_on_metadata;
DROP DATABASE mysqltest_innodb_zip;
diff --git a/mysql-test/suite/innodb_zip/r/large_blob.result b/mysql-test/suite/innodb_zip/r/large_blob.result
index 7070d610f58..55bb90aab5c 100644
--- a/mysql-test/suite/innodb_zip/r/large_blob.result
+++ b/mysql-test/suite/innodb_zip/r/large_blob.result
@@ -1,8 +1,9 @@
#
# This tests the use of large blobs in InnoDB.
#
-call mtr.add_suppression("InnoDB: Warning: a long semaphore wait");
SET GLOBAL innodb_file_per_table = OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
#
# System tablespace, Row Format = Redundant
#
@@ -26,6 +27,8 @@ INSERT INTO t1 VALUES (1, '');
UPDATE t1 SET c2=@longblob;
DROP TABLE t1;
SET GLOBAL innodb_file_per_table = ON;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
#
# Separate tablespace, Row Format = Redundant
#
diff --git a/mysql-test/suite/innodb_zip/r/page_size,4k.rdiff b/mysql-test/suite/innodb_zip/r/page_size,4k.rdiff
index 494da214f2e..c901bf6dbe0 100644
--- a/mysql-test/suite/innodb_zip/r/page_size,4k.rdiff
+++ b/mysql-test/suite/innodb_zip/r/page_size,4k.rdiff
@@ -7,7 +7,7 @@
# Test 3) Query some information_shema tables that are dependent upon
# the page size.
SELECT t.name table_name, t.n_cols, t.flag table_flags,
-@@ -36,13 +36,13 @@
+@@ -36,7 +36,7 @@
table_name n_cols table_flags index_name root_page type n_fields merge_threshold
test/t1 5 0 PRIMARY 3 3 1 50
test/t2 5 1 PRIMARY 3 3 1 50
@@ -16,14 +16,16 @@
test/t4 5 33 PRIMARY 3 3 1 50
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+@@ -45,7 +45,7 @@
+ innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t1 DEFAULT DEFAULT MYSQLD_DATADIR/test/t1.ibd
test/t2 DEFAULT DEFAULT MYSQLD_DATADIR/test/t2.ibd
-test/t3 DEFAULT 8192 MYSQLD_DATADIR/test/t3.ibd
+test/t3 DEFAULT 2048 MYSQLD_DATADIR/test/t3.ibd
test/t4 DEFAULT DEFAULT MYSQLD_DATADIR/test/t4.ibd
+ innodb_temporary DEFAULT DEFAULT MYSQLD_DATADIR/ibtmp1
DROP TABLE t1, t2, t3, t4;
- # Test 4) The maximum row size is dependent upon the page size.
-@@ -51,141 +51,90 @@
+@@ -55,141 +55,90 @@
SET SESSION innodb_strict_mode = ON;
CREATE TABLE t1 (
c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200),
@@ -211,7 +213,7 @@
ALTER TABLE t1 KEY_BLOCK_SIZE=4;
SHOW WARNINGS;
Level Code Message
-@@ -217,15 +166,21 @@
+@@ -221,15 +170,21 @@
DROP TABLE t1;
SET SESSION innodb_strict_mode = OFF;
CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16;
@@ -233,7 +235,7 @@
SELECT table_name, row_format, create_options
FROM information_schema.tables WHERE table_name = 't1';
table_name row_format create_options
-@@ -269,6 +224,7 @@
+@@ -275,6 +230,7 @@
ERROR HY000: Can't create table `test`.`t4` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
@@ -241,7 +243,7 @@
Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Error 1005 Can't create table `test`.`t4` (errno: 140 "Wrong create options")
Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB
-@@ -276,105 +232,11 @@
+@@ -282,107 +238,13 @@
ERROR HY000: Can't create table `test`.`t5` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
@@ -250,6 +252,8 @@
Error 1005 Can't create table `test`.`t5` (errno: 140 "Wrong create options")
Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB
SET GLOBAL innodb_file_per_table = ON;
+ Warnings:
+ Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
-# Test 7) This series of tests were moved from innodb-index to here
-# because the second alter table t1 assumes a 16k page size.
-# Moving the test allows the rest of innodb-index to be run on all
@@ -348,7 +352,7 @@
# Test 8) Test creating a table that could lead to undo log overflow.
CREATE TABLE t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob,
h blob,i blob,j blob,k blob,l blob,m blob,n blob,
-@@ -389,10 +251,6 @@
+@@ -397,10 +259,6 @@
UPDATE t1 SET a=@b,b=@b,c=@b,d=@b,e=@b,f=@b,g=@b,h=@b,i=@b,j=@b,
k=@b,l=@b,m=@b,n=@b,o=@b,p=@b,q=@b,r=@b,s=@b,t=@b,u=@b;
CREATE INDEX t1a ON t1 (a(767));
@@ -359,7 +363,7 @@
UPDATE t1 SET a=@c,b=@c,c=@c,d=@c,e=@c,f=@c,g=@c,h=@c,i=@c,j=@c,
k=@c,l=@c,m=@c,n=@c,o=@c,p=@c,q=@c,r=@c,s=@c,t=@c,u=@c;
CREATE INDEX t1f ON t1 (f(767));
-@@ -407,37 +265,15 @@
+@@ -415,37 +273,15 @@
COMMIT;
CREATE INDEX t1g ON t1 (g(767));
UPDATE t1 SET g=@e;
@@ -401,7 +405,7 @@
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
-@@ -463,28 +299,12 @@
+@@ -471,28 +307,12 @@
`t` blob DEFAULT NULL,
`u` blob DEFAULT NULL,
KEY `t1a` (`a`(767)),
@@ -432,7 +436,7 @@
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci ROW_FORMAT=DYNAMIC
DROP TABLE t1;
# Bug#12547647 UPDATE LOGGING COULD EXCEED LOG PAGE SIZE
-@@ -569,27 +389,25 @@
+@@ -577,27 +397,25 @@
DROP TABLE t1;
CREATE TABLE t1(
c text NOT NULL, d text NOT NULL,
diff --git a/mysql-test/suite/innodb_zip/r/page_size,8k.rdiff b/mysql-test/suite/innodb_zip/r/page_size,8k.rdiff
index a22fa9e9b95..90b2a1f7c48 100644
--- a/mysql-test/suite/innodb_zip/r/page_size,8k.rdiff
+++ b/mysql-test/suite/innodb_zip/r/page_size,8k.rdiff
@@ -7,7 +7,7 @@
# Test 3) Query some information_shema tables that are dependent upon
# the page size.
SELECT t.name table_name, t.n_cols, t.flag table_flags,
-@@ -36,13 +36,13 @@
+@@ -36,7 +36,7 @@
table_name n_cols table_flags index_name root_page type n_fields merge_threshold
test/t1 5 0 PRIMARY 3 3 1 50
test/t2 5 1 PRIMARY 3 3 1 50
@@ -16,14 +16,16 @@
test/t4 5 33 PRIMARY 3 3 1 50
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+@@ -45,7 +45,7 @@
+ innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t1 DEFAULT DEFAULT MYSQLD_DATADIR/test/t1.ibd
test/t2 DEFAULT DEFAULT MYSQLD_DATADIR/test/t2.ibd
-test/t3 DEFAULT 8192 MYSQLD_DATADIR/test/t3.ibd
+test/t3 DEFAULT 4096 MYSQLD_DATADIR/test/t3.ibd
test/t4 DEFAULT DEFAULT MYSQLD_DATADIR/test/t4.ibd
+ innodb_temporary DEFAULT DEFAULT MYSQLD_DATADIR/ibtmp1
DROP TABLE t1, t2, t3, t4;
- # Test 4) The maximum row size is dependent upon the page size.
-@@ -53,133 +53,97 @@
+@@ -57,133 +57,97 @@
c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200),
c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200),
c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200),
@@ -199,7 +201,7 @@
SHOW WARNINGS;
Level Code Message
SELECT table_name, row_format, create_options
-@@ -217,8 +181,11 @@
+@@ -221,8 +185,11 @@
DROP TABLE t1;
SET SESSION innodb_strict_mode = OFF;
CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16;
@@ -211,7 +213,7 @@
SELECT table_name, row_format, create_options
FROM information_schema.tables WHERE table_name = 't1';
table_name row_format create_options
-@@ -276,105 +243,11 @@
+@@ -282,107 +249,13 @@
ERROR HY000: Can't create table `test`.`t5` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
@@ -220,6 +222,8 @@
Error 1005 Can't create table `test`.`t5` (errno: 140 "Wrong create options")
Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB
SET GLOBAL innodb_file_per_table = ON;
+ Warnings:
+ Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
-# Test 7) This series of tests were moved from innodb-index to here
-# because the second alter table t1 assumes a 16k page size.
-# Moving the test allows the rest of innodb-index to be run on all
@@ -318,7 +322,7 @@
# Test 8) Test creating a table that could lead to undo log overflow.
CREATE TABLE t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob,
h blob,i blob,j blob,k blob,l blob,m blob,n blob,
-@@ -389,10 +262,6 @@
+@@ -397,10 +270,6 @@
UPDATE t1 SET a=@b,b=@b,c=@b,d=@b,e=@b,f=@b,g=@b,h=@b,i=@b,j=@b,
k=@b,l=@b,m=@b,n=@b,o=@b,p=@b,q=@b,r=@b,s=@b,t=@b,u=@b;
CREATE INDEX t1a ON t1 (a(767));
@@ -329,7 +333,7 @@
UPDATE t1 SET a=@c,b=@c,c=@c,d=@c,e=@c,f=@c,g=@c,h=@c,i=@c,j=@c,
k=@c,l=@c,m=@c,n=@c,o=@c,p=@c,q=@c,r=@c,s=@c,t=@c,u=@c;
CREATE INDEX t1f ON t1 (f(767));
-@@ -407,30 +276,6 @@
+@@ -415,30 +284,6 @@
COMMIT;
CREATE INDEX t1g ON t1 (g(767));
UPDATE t1 SET g=@e;
@@ -360,7 +364,7 @@
CREATE INDEX t1t ON t1 (t(767));
BEGIN;
UPDATE t1 SET t=@e;
-@@ -463,24 +308,8 @@
+@@ -471,24 +316,8 @@
`t` blob DEFAULT NULL,
`u` blob DEFAULT NULL,
KEY `t1a` (`a`(767)),
@@ -385,7 +389,7 @@
KEY `t1t` (`t`(767)),
KEY `t1u` (`u`(767)),
KEY `t1ut` (`u`(767),`t`(767)),
-@@ -572,14 +401,14 @@
+@@ -580,14 +409,14 @@
PRIMARY KEY (c(767),d(767)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
Warnings:
@@ -402,7 +406,7 @@
DROP TABLE t1;
CREATE TABLE t1(
c text NOT NULL, d text NOT NULL,
-@@ -589,7 +418,7 @@
+@@ -597,7 +426,7 @@
CREATE TABLE t1(c text, PRIMARY KEY (c(440)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
Warnings:
diff --git a/mysql-test/suite/innodb_zip/r/page_size.result b/mysql-test/suite/innodb_zip/r/page_size.result
index 47effe06884..c8d405e912a 100644
--- a/mysql-test/suite/innodb_zip/r/page_size.result
+++ b/mysql-test/suite/innodb_zip/r/page_size.result
@@ -40,6 +40,9 @@ test/t3 5 41 PRIMARY 3 3 1 50
test/t4 5 33 PRIMARY 3 3 1 50
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+innodb_undo001 DEFAULT DEFAULT MYSQLD_DATADIR//undo001
+innodb_undo002 DEFAULT DEFAULT MYSQLD_DATADIR//undo002
+innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t1 DEFAULT DEFAULT MYSQLD_DATADIR/test/t1.ibd
test/t2 DEFAULT DEFAULT MYSQLD_DATADIR/test/t2.ibd
test/t3 DEFAULT 8192 MYSQLD_DATADIR/test/t3.ibd
@@ -263,6 +266,8 @@ DROP TABLE t1;
# Test 6) KEY_BLOCK_SIZE with innodb_file_per_table=OFF
SET SESSION innodb_strict_mode = ON;
SET GLOBAL innodb_file_per_table = OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
SHOW VARIABLES LIKE 'innodb_file_per_table';
Variable_name Value
innodb_file_per_table OFF
@@ -281,6 +286,8 @@ Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Error 1005 Can't create table `test`.`t5` (errno: 140 "Wrong create options")
Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB
SET GLOBAL innodb_file_per_table = ON;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
# Test 7) This series of tests were moved from innodb-index to here
# because the second alter table t1 assumes a 16k page size.
# Moving the test allows the rest of innodb-index to be run on all
diff --git a/mysql-test/suite/innodb_zip/r/restart.result b/mysql-test/suite/innodb_zip/r/restart.result
index eb1bfe67c5d..133cf020d55 100644
--- a/mysql-test/suite/innodb_zip/r/restart.result
+++ b/mysql-test/suite/innodb_zip/r/restart.result
@@ -3,7 +3,6 @@ SET default_storage_engine=InnoDB;
# A series of tests to make sure tables are opened after restart.
# Bug#13357607 Compressed file-per-table tablespaces fail to open
#
-set global innodb_file_per_table=on;
#
# Create and insert records into a REDUNDANT row formatted table.
#
@@ -211,6 +210,9 @@ test/t7_restart#p#p1#sp#s2 test/t7_restart#p#p1#sp#s2 97 8 Dynamic 0
test/t7_restart#p#p1#sp#s3 test/t7_restart#p#p1#sp#s3 97 8 Dynamic 0
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+innodb_undo001 DEFAULT DEFAULT MYSQLD_DATADIR//undo001
+innodb_undo002 DEFAULT DEFAULT MYSQLD_DATADIR//undo002
+innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t1_restart DEFAULT DEFAULT MYSQLD_DATADIR/test/t1_restart.ibd
test/t2_restart DEFAULT DEFAULT MYSQLD_DATADIR/test/t2_restart.ibd
test/t3_restart DEFAULT 2048 MYSQLD_DATADIR/test/t3_restart.ibd
@@ -265,9 +267,6 @@ t7_restart#p#p1#sp#s3.ibd
# Start the server and show that tables are still visible and accessible.
#
# restart
-SHOW VARIABLES LIKE 'innodb_file_per_table';
-Variable_name Value
-innodb_file_per_table ON
SHOW CREATE TABLE t1_restart;
Table Create Table
t1_restart CREATE TABLE `t1_restart` (
@@ -397,6 +396,9 @@ test/t7_restart#p#p1#sp#s2 test/t7_restart#p#p1#sp#s2 97 8 Dynamic 0
test/t7_restart#p#p1#sp#s3 test/t7_restart#p#p1#sp#s3 97 8 Dynamic 0
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+innodb_undo001 DEFAULT DEFAULT MYSQLD_DATADIR//undo001
+innodb_undo002 DEFAULT DEFAULT MYSQLD_DATADIR//undo002
+innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t1_restart DEFAULT DEFAULT MYSQLD_DATADIR/test/t1_restart.ibd
test/t2_restart DEFAULT DEFAULT MYSQLD_DATADIR/test/t2_restart.ibd
test/t3_restart DEFAULT 2048 MYSQLD_DATADIR/test/t3_restart.ibd
@@ -421,6 +423,9 @@ ALTER TABLE t6_restart TRUNCATE PARTITION p2;
ALTER TABLE t7_restart TRUNCATE PARTITION p1;
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+innodb_undo001 DEFAULT DEFAULT MYSQLD_DATADIR//undo001
+innodb_undo002 DEFAULT DEFAULT MYSQLD_DATADIR//undo002
+innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t4_restart DEFAULT DEFAULT MYSQLD_DATADIR/test/t4_restart.ibd
test/t6_restart#p#p0 DEFAULT 2048 MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd
test/t6_restart#p#p1 DEFAULT 2048 MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd
@@ -522,11 +527,11 @@ t7_restart#p#p1#sp#s3.ibd
# Start the server and show the tablespaces.
#
# restart
-SHOW VARIABLES LIKE 'innodb_file_per_table';
-Variable_name Value
-innodb_file_per_table ON
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+innodb_undo001 DEFAULT DEFAULT MYSQLD_DATADIR//undo001
+innodb_undo002 DEFAULT DEFAULT MYSQLD_DATADIR//undo002
+innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t4_restart DEFAULT DEFAULT MYSQLD_DATADIR/test/t4_restart.ibd
test/t6_restart#p#p0 DEFAULT 2048 MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd
test/t6_restart#p#p1 DEFAULT 2048 MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd
@@ -629,6 +634,9 @@ RENAME TABLE t6_restart TO t66_restart;
RENAME TABLE t7_restart TO t77_restart;
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+innodb_undo001 DEFAULT DEFAULT MYSQLD_DATADIR//undo001
+innodb_undo002 DEFAULT DEFAULT MYSQLD_DATADIR//undo002
+innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t4_restart DEFAULT DEFAULT MYSQLD_DATADIR/test/t4_restart.ibd
test/t66_restart#p#p0 DEFAULT 2048 MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p0.ibd
test/t66_restart#p#p1 DEFAULT 2048 MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p1.ibd
@@ -723,11 +731,11 @@ t77_restart#p#p1#sp#s3.ibd
# Restart the server
#
# restart
-SHOW VARIABLES LIKE 'innodb_file_per_table';
-Variable_name Value
-innodb_file_per_table ON
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+innodb_undo001 DEFAULT DEFAULT MYSQLD_DATADIR//undo001
+innodb_undo002 DEFAULT DEFAULT MYSQLD_DATADIR//undo002
+innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t4_restart DEFAULT DEFAULT MYSQLD_DATADIR/test/t4_restart.ibd
test/t66_restart#p#p0 DEFAULT 2048 MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p0.ibd
test/t66_restart#p#p1 DEFAULT 2048 MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p1.ibd
@@ -863,6 +871,9 @@ t77_restart#p#p1#sp#s3.ibd
# restart
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+innodb_undo001 DEFAULT DEFAULT MYSQLD_DATADIR//undo001
+innodb_undo002 DEFAULT DEFAULT MYSQLD_DATADIR//undo002
+innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t4_restart DEFAULT DEFAULT MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd
test/t66_restart#p#p0 DEFAULT 2048 MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p0.ibd
test/t66_restart#p#p1 DEFAULT 2048 MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p1.ibd
@@ -1002,6 +1013,9 @@ t77_restart.par
# restart
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Page_Size Zip_Size Path
+innodb_undo001 DEFAULT DEFAULT MYSQLD_DATADIR//undo001
+innodb_undo002 DEFAULT DEFAULT MYSQLD_DATADIR//undo002
+innodb_undo003 DEFAULT DEFAULT MYSQLD_DATADIR//undo003
test/t4_restart DEFAULT DEFAULT MYSQLD_DATADIR/test/t4_restart.ibd
test/t66_restart#p#p0 DEFAULT 2048 MYSQLD_DATADIR/test/t66_restart#p#p0.ibd
test/t66_restart#p#p1 DEFAULT 2048 MYSQLD_DATADIR/test/t66_restart#p#p1.ibd
diff --git a/mysql-test/suite/innodb_zip/r/wl5522_debug_zip.result b/mysql-test/suite/innodb_zip/r/wl5522_debug_zip.result
index 62a28a981cd..f44190680c0 100644
--- a/mysql-test/suite/innodb_zip/r/wl5522_debug_zip.result
+++ b/mysql-test/suite/innodb_zip/r/wl5522_debug_zip.result
@@ -94,11 +94,6 @@ ALTER TABLE t1 IMPORT TABLESPACE;
ERROR HY000: Got error 42 'Tablespace not found' from ./test/t1.ibd
SET SESSION debug_dbug=@saved_debug_dbug;
restore: t1 .ibd and .cfg files
-SET SESSION debug_dbug="+d,ib_import_check_bitmap_failure";
-ALTER TABLE t1 IMPORT TABLESPACE;
-ERROR HY000: Index for table 't1' is corrupt; try to repair it
-SET SESSION debug_dbug=@saved_debug_dbug;
-restore: t1 .ibd and .cfg files
SET SESSION debug_dbug="+d,ib_import_cluster_root_adjust_failure";
ALTER TABLE t1 IMPORT TABLESPACE;
ERROR HY000: Index for table 't1' is corrupt; try to repair it
diff --git a/mysql-test/suite/innodb_zip/r/wl5522_zip.result b/mysql-test/suite/innodb_zip/r/wl5522_zip.result
index 1b044d68680..a7cc00cb1fd 100644
--- a/mysql-test/suite/innodb_zip/r/wl5522_zip.result
+++ b/mysql-test/suite/innodb_zip/r/wl5522_zip.result
@@ -263,6 +263,8 @@ c1 c2
unlink: t1.cfg
DROP TABLE t1;
SET GLOBAL innodb_file_per_table = 0;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t1(
c1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
c2 INT) ENGINE=InnoDB;
@@ -280,6 +282,8 @@ Warning 1809 Table `test`.`t1` in system tablespace
UNLOCK TABLES;
DROP TABLE t1;
SET GLOBAL innodb_file_per_table = 1;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t1(
c1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
c2 INT, INDEX idx(c2)) ENGINE=InnoDB
diff --git a/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result b/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result
index 05b4793eb4d..f46678340e2 100644
--- a/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result
+++ b/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result
@@ -8,7 +8,6 @@
# check the size and compression stats of the table tab5
#******************************************************************
# set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_cmp_per_index_enabled=ON;
SET GLOBAL innodb_compression_level=0;
#******************************************************************
@@ -318,7 +317,6 @@ The size of the tab5.ibd file: 5242880
# set the flag on (default off)
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_compression_level=0;
# fetch the compressed page and check the stats
# The stats figure may be different/same for each restart.
@@ -655,7 +653,6 @@ The size of the tab5.ibd file: 2097152
# set the flag on (default off)
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_compression_level=0;
# fetch the compressed page and check the stats
# The stats figure may be different/same for each restart.
@@ -1615,7 +1612,6 @@ DROP TABLE tab5;
SET GLOBAL innodb_cmp_per_index_enabled=0;
SET GLOBAL innodb_cmp_per_index_enabled=1;
# set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_compression_level=9;
#******************************************************************
# Test 2-1K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 with page size 1K
@@ -1926,7 +1922,6 @@ The size of the tab5.ibd file: 65536
# set the flag on (default off)
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_compression_level=9;
# fetch the compressed page and check the stats
# The stats figure may be different/same for each restart.
@@ -2265,7 +2260,6 @@ The size of the tab5.ibd file: 65536
# set the flag on (default off)
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_compression_level=9;
# fetch the compressed page and check the stats
# The stats figure may be different/same for each restart.
@@ -4710,8 +4704,6 @@ SELECT @@innodb_cmp_per_index_enabled;
@@innodb_cmp_per_index_enabled 1
SELECT @@innodb_compression_failure_threshold_pct;
@@innodb_compression_failure_threshold_pct 0
-SELECT @@innodb_file_per_table;
-@@innodb_file_per_table 1
SELECT @@innodb_compression_level;
@@innodb_compression_level 6
#******************************************************************
@@ -5022,7 +5014,6 @@ The size of the tab5.ibd file: 65536
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
SET GLOBAL innodb_compression_failure_threshold_pct=0;
-SET GLOBAL innodb_file_per_table=on;
# fetch the compressed page and check the stats
# The stats figure may be different/same for each restart.
===============
@@ -6289,7 +6280,6 @@ DROP TABLE tab5;
SET GLOBAL innodb_cmp_per_index_enabled=OFF;
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_cmp_per_index_enabled=ON;
SET GLOBAL innodb_compression_failure_threshold_pct=10;
SET GLOBAL innodb_compression_level=Default;
@@ -6298,8 +6288,6 @@ SELECT @@innodb_cmp_per_index_enabled;
@@innodb_cmp_per_index_enabled 1
SELECT @@innodb_compression_failure_threshold_pct;
@@innodb_compression_failure_threshold_pct 10
-SELECT @@innodb_file_per_table;
-@@innodb_file_per_table 1
SELECT @@innodb_compression_level;
@@innodb_compression_level 6
#******************************************************************
@@ -6610,8 +6598,6 @@ The size of the tab5.ibd file: 65536
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
SET GLOBAL innodb_compression_failure_threshold_pct=10;
-SET GLOBAL innodb_file_per_table=on;
-SET GLOBAL innodb_compression_failure_threshold_pct=10;
# fetch the compressed page and check the stats
# The stats figure may be different/same for each restart.
===============
@@ -7874,7 +7860,6 @@ DROP TABLE tab5;
# reset the stat table before starting next testcase
SET GLOBAL innodb_cmp_per_index_enabled=0;
SET GLOBAL innodb_cmp_per_index_enabled=1;
-SET GLOBAL innodb_file_per_table=ON;
SET GLOBAL innodb_compression_level=default;
SET GLOBAL innodb_compression_failure_threshold_pct=default;
# create a table page size=1K
@@ -7923,6 +7908,5 @@ compress_ops_ok 1
DROP TABLE tab5, test.tab5;
DROP DATABASE sb;
# reset the flags
-SET GLOBAL innodb_file_per_table=default;
SET GLOBAL innodb_cmp_per_index_enabled=default;
SET GLOBAL innodb_compression_failure_threshold_pct=default;
diff --git a/mysql-test/suite/innodb_zip/t/bug36169.test b/mysql-test/suite/innodb_zip/t/bug36169.test
index 07566b204bd..8ab970f67b2 100644
--- a/mysql-test/suite/innodb_zip/t/bug36169.test
+++ b/mysql-test/suite/innodb_zip/t/bug36169.test
@@ -5,9 +5,6 @@
-- source include/innodb_page_size_small.inc
-let $file_per_table=`select @@innodb_file_per_table`;
-SET GLOBAL innodb_file_per_table=ON;
-
#
# The following is copied from http://bugs.mysql.com/36169
# (http://bugs.mysql.com/file.php?id=9121)
@@ -1155,7 +1152,5 @@ DROP TABLE IF EXISTS table2;
DROP TABLE IF EXISTS table3;
DROP TABLE IF EXISTS table4;
DROP TABLE IF EXISTS table5;
+-- enable_query_log
DROP TABLE IF EXISTS table6;
-
-EVAL SET GLOBAL innodb_file_per_table=$file_per_table;
-SET sql_mode = default;
diff --git a/mysql-test/suite/innodb_zip/t/bug53591.test b/mysql-test/suite/innodb_zip/t/bug53591.test
index 17c79e0f6f8..ef4d12ad078 100644
--- a/mysql-test/suite/innodb_zip/t/bug53591.test
+++ b/mysql-test/suite/innodb_zip/t/bug53591.test
@@ -2,13 +2,6 @@
call mtr.add_suppression('InnoDB: Cannot add field.*because after adding it, the row size is');
-let $file_per_table=`select @@innodb_file_per_table`;
-
-SET GLOBAL innodb_file_per_table=on;
-SET GLOBAL innodb_strict_mode=on;
-
-set old_alter_table=0;
-
CREATE TABLE bug53591(a text charset utf8 not null)
ENGINE=InnoDB KEY_BLOCK_SIZE=1;
-- replace_result 8126 {checked_valid} 4030 {checked_valid} 1982 {checked_valid}
@@ -18,6 +11,3 @@ ALTER TABLE bug53591 ADD PRIMARY KEY(a(220));
SHOW WARNINGS;
DROP TABLE bug53591;
-
-EVAL SET GLOBAL innodb_file_per_table=$file_per_table;
-SET GLOBAL innodb_strict_mode=DEFAULT;
diff --git a/mysql-test/suite/innodb_zip/t/bug56680.test b/mysql-test/suite/innodb_zip/t/bug56680.test
index 54cbc7ca798..0d441dea0f5 100644
--- a/mysql-test/suite/innodb_zip/t/bug56680.test
+++ b/mysql-test/suite/innodb_zip/t/bug56680.test
@@ -16,7 +16,6 @@
SET GLOBAL innodb_change_buffering_debug = 1;
-- enable_query_log
SET GLOBAL tx_isolation='REPEATABLE-READ';
-SET GLOBAL innodb_file_per_table=on;
CREATE TABLE bug56680(
a INT AUTO_INCREMENT PRIMARY KEY,
diff --git a/mysql-test/suite/innodb_zip/t/cmp_drop_table.test b/mysql-test/suite/innodb_zip/t/cmp_drop_table.test
index ae6bfd9fb59..e64514fc9fb 100644
--- a/mysql-test/suite/innodb_zip/t/cmp_drop_table.test
+++ b/mysql-test/suite/innodb_zip/t/cmp_drop_table.test
@@ -3,12 +3,8 @@
# scans through pages
-- source include/not_encrypted.inc
-let $per_table=`select @@innodb_file_per_table`;
-
-- let $query_i_s = SELECT page_size FROM information_schema.innodb_cmpmem WHERE pages_used > 0
-set global innodb_file_per_table=on;
-
create table t1(a text) engine=innodb key_block_size=4;
-- disable_query_log
@@ -50,10 +46,3 @@ while ($i)
-- eval $query_i_s
drop table t2;
-
-#
-# restore environment to the state it was before this test execution
-#
-
--- disable_query_log
-eval set global innodb_file_per_table=$per_table;
diff --git a/mysql-test/suite/innodb_zip/t/create_options.test b/mysql-test/suite/innodb_zip/t/create_options.test
index fce64060df3..9840a28a2ce 100644
--- a/mysql-test/suite/innodb_zip/t/create_options.test
+++ b/mysql-test/suite/innodb_zip/t/create_options.test
@@ -59,11 +59,6 @@
--source include/innodb_page_size_small.inc
SET default_storage_engine=InnoDB;
-# These values can change during the test
-LET $innodb_file_per_table_orig=`select @@innodb_file_per_table`;
-
-SET GLOBAL innodb_file_per_table=ON;
-
# The first half of these tests are with strict mode ON.
SET SESSION innodb_strict_mode = ON;
@@ -481,7 +476,3 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
--echo # Cleanup
DROP TABLE t1;
-
---disable_query_log
-EVAL SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig;
---enable_query_log
diff --git a/mysql-test/suite/innodb_zip/t/index_large_prefix.test b/mysql-test/suite/innodb_zip/t/index_large_prefix.test
index a254c4e61ee..5c1e31a2db7 100644
--- a/mysql-test/suite/innodb_zip/t/index_large_prefix.test
+++ b/mysql-test/suite/innodb_zip/t/index_large_prefix.test
@@ -10,10 +10,6 @@ SET @save_innodb_read_only_compressed=@@GLOBAL.innodb_read_only_compressed;
SET GLOBAL innodb_read_only_compressed=OFF;
--enable_query_log
-let $innodb_file_per_table_orig=`select @@innodb_file_per_table`;
-
-set global innodb_file_per_table=1;
-
-- echo ### Test 1 ###
# Create a table of DYNAMIC format, with a primary index of 1000 bytes in
# size
@@ -408,5 +404,3 @@ create index idx2 on worklog5743(a(767));
SET GLOBAL innodb_read_only_compressed=@save_innodb_read_only_compressed;
--enable_query_log
drop table worklog5743;
-
-eval SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig;
diff --git a/mysql-test/suite/innodb_zip/t/index_large_prefix_4k.test b/mysql-test/suite/innodb_zip/t/index_large_prefix_4k.test
index 04f7ac65edf..8ada226779a 100644
--- a/mysql-test/suite/innodb_zip/t/index_large_prefix_4k.test
+++ b/mysql-test/suite/innodb_zip/t/index_large_prefix_4k.test
@@ -10,10 +10,6 @@ SET @save_innodb_read_only_compressed=@@GLOBAL.innodb_read_only_compressed;
SET GLOBAL innodb_read_only_compressed=OFF;
--enable_query_log
-let $innodb_file_per_table_orig=`select @@innodb_file_per_table`;
-
-set global innodb_file_per_table=1;
-
-- echo ### Test 1 ###
# Create a table of DYNAMIC format, with a primary index of 768 bytes in
# size
@@ -380,5 +376,3 @@ create index idx2 on worklog5743(a(767));
SET GLOBAL innodb_read_only_compressed=@save_innodb_read_only_compressed;
--enable_query_log
drop table worklog5743;
-
-eval SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig;
diff --git a/mysql-test/suite/innodb_zip/t/index_large_prefix_8k.test b/mysql-test/suite/innodb_zip/t/index_large_prefix_8k.test
index c9cd9574a95..e20de5e3e93 100644
--- a/mysql-test/suite/innodb_zip/t/index_large_prefix_8k.test
+++ b/mysql-test/suite/innodb_zip/t/index_large_prefix_8k.test
@@ -10,10 +10,6 @@ SET @save_innodb_read_only_compressed=@@GLOBAL.innodb_read_only_compressed;
SET GLOBAL innodb_read_only_compressed=OFF;
--enable_query_log
-let $innodb_file_per_table_orig=`select @@innodb_file_per_table`;
-
-set global innodb_file_per_table=1;
-
-- echo ### Test 1 ###
# Create a table of DYNAMIC format, with a primary index of 1000 bytes in
# size
@@ -402,5 +398,3 @@ create index idx2 on worklog5743(a(767));
SET GLOBAL innodb_read_only_compressed=@save_innodb_read_only_compressed;
--enable_query_log
drop table worklog5743;
-
-eval SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig;
diff --git a/mysql-test/suite/innodb_zip/t/innochecksum_2.test b/mysql-test/suite/innodb_zip/t/innochecksum_2.test
index 62e792c1ce4..1743bd4a1eb 100644
--- a/mysql-test/suite/innodb_zip/t/innochecksum_2.test
+++ b/mysql-test/suite/innodb_zip/t/innochecksum_2.test
@@ -62,7 +62,7 @@ open IN_FILE,"<", "$dir/tmp/$file" or die $!;
open OUT_FILE, ">", "$dir/tmp/tmpfile" or die $!;
while(<IN_FILE>) {
unless ($_=~ /^debug.*$/ || $_=~ /\-#, \-\-debug.*$/ || $_=~ /http:.*html/) {
- $_=~ s/^\S*innochecksum.+Ver.+[0-9]*\.[0-9]*\.[0-9]*.+$/innochecksum Ver #.#.#/g;
+ $_=~ s/^\S*innochecksum.* from .+$/innochecksum Ver #.#.#/g;
$_=~ s/(Copyright\s\(c\))\s([0-9]*),\s([0-9]*)(.*)/$1 YEAR, YEAR $4/g;
$_=~ s/Usage:.*\[-c/Usage: innochecksum [-c/g;
print OUT_FILE $_;
@@ -85,7 +85,7 @@ EOF
--exec $INNOCHECKSUM -c $MYSQLD_DATADIR/test/t1.ibd
--echo [4]:# Print the version of innochecksum and exit
---replace_regex /.*innochecksum.*Ver.*[0-9]*.[0-9]*.[0-9]*.*/innochecksum Ver #.#.#/
+--replace_regex /for \S+/for OS/ /\d+/#/ /#[-_A-Za-z0-9]*-MariaDB,/#-MariaDB,/ /\(.*\)/(ARCH)/ /^.*innochecksum(\.exe)?/innochecksum/
--exec $INNOCHECKSUM -V $MYSQLD_DATADIR/test/t1.ibd
--echo # Restart the DB server
diff --git a/mysql-test/suite/innodb_zip/t/innodb-zip.test b/mysql-test/suite/innodb_zip/t/innodb-zip.test
index 507900aa88c..47867f3fa3e 100644
--- a/mysql-test/suite/innodb_zip/t/innodb-zip.test
+++ b/mysql-test/suite/innodb_zip/t/innodb-zip.test
@@ -6,12 +6,12 @@ SELECT table_name, row_format, data_length, index_length
FROM information_schema.tables
WHERE table_schema='mysqltest_innodb_zip';
-let $per_table=`select @@innodb_file_per_table`;
SET @save_innodb_stats_on_metadata=@@global.innodb_stats_on_metadata;
--let $query_i_s = SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql' and table_schema != 'sys' order by table_name
set session innodb_strict_mode=0;
+SET @save_fpt=@@GLOBAL.innodb_file_per_table;
set global innodb_file_per_table=off;
SET @@global.innodb_stats_on_metadata=ON;
@@ -123,8 +123,6 @@ select count(*) from t1 where c4 = repeat('C', 20000);
update t1 set c3 = repeat('E', 20000) where c1 = 1;
drop table t1;
-set global innodb_file_per_table = on;
-
set innodb_strict_mode = off;
create table t1 (id int primary key) engine = innodb key_block_size = 0;
drop table t1;
@@ -254,9 +252,7 @@ drop table t7, t8, t9;
# restore environment to the state it was before this test execution
#
--- disable_query_log
-eval set global innodb_file_per_table=$per_table;
+SET GLOBAL innodb_file_per_table=@save_fpt;
SET @@global.innodb_stats_on_metadata=@save_innodb_stats_on_metadata;
---enable_query_log
DROP DATABASE mysqltest_innodb_zip;
diff --git a/mysql-test/suite/innodb_zip/t/large_blob.test b/mysql-test/suite/innodb_zip/t/large_blob.test
index dd208129d08..d8163737a17 100644
--- a/mysql-test/suite/innodb_zip/t/large_blob.test
+++ b/mysql-test/suite/innodb_zip/t/large_blob.test
@@ -7,9 +7,6 @@
--source include/big_test.inc
--disable_query_log
-# These values can change during the test
-let $innodb_file_per_table_orig = `SELECT @@innodb_file_per_table`;
-
# Create a 20MB blob that does not compress easily.
# 1000 Random characters is enough to keep compression low.
set @alphabet="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
@@ -27,8 +24,6 @@ while ($1 > 1)
set @longblob=repeat(@blob,200000);
--enable_query_log
-call mtr.add_suppression("InnoDB: Warning: a long semaphore wait");
-
SET GLOBAL innodb_file_per_table = OFF;
--echo #
@@ -123,7 +118,3 @@ CREATE TABLE t1 (
INSERT INTO t1 VALUES (1, '');
UPDATE t1 SET c2=@longblob;
DROP TABLE t1;
-
---disable_query_log
-EVAL SET GLOBAL innodb_file_per_table = $innodb_file_per_table_orig;
---enable_query_log
diff --git a/mysql-test/suite/innodb_zip/t/page_size.test b/mysql-test/suite/innodb_zip/t/page_size.test
index 16d65a139cf..91f9fc580b8 100644
--- a/mysql-test/suite/innodb_zip/t/page_size.test
+++ b/mysql-test/suite/innodb_zip/t/page_size.test
@@ -728,29 +728,6 @@ SHOW WARNINGS;
ROLLBACK;
DROP TABLE bug12547647;
-#
-# Bug #13336585 - INNODB: CHANGE BUFFERING WITH 4K PAGES CAN ASSERT
-# IF SECONDARY KEY IS NEAR MAX
-# If the secondary index tuple is close to half the page size,
-# ibuf_insert_low() could return DB_TOO_BIG_RECORD, which is not expected
-# in ibuf_insert(). In order to insure this does not happen, WL5756
-# imposes a maximum key length of 768 for 4k pages and 1536 for 8k pages.
-# The existing max key Size for 16k pages is 3072.
-#
-
-#-- disable_query_log
-# The flag innodb_change_buffering_debug is only available in debug builds.
-# It instructs InnoDB to try to evict pages from the buffer pool when
-# change buffering is possible, so that the change buffer will be used
-# whenever possible.
-# This flag is not used currently since it exposes valgrind error in ibuf
-# code with the following SQL
-#-- error 0,ER_UNKNOWN_SYSTEM_VARIABLE
-#SET @innodb_change_buffering_debug_orig = @@innodb_change_buffering_debug;
-#-- error 0,ER_UNKNOWN_SYSTEM_VARIABLE
-#SET GLOBAL innodb_change_buffering_debug = 1;
-#-- enable_query_log
-
# make sure the largest possible key entry can be added to the insert buffer.
# Make enough records so that the root page is not a leaf page.
SET SESSION innodb_strict_mode = OFF;
@@ -823,11 +800,6 @@ INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,
DELETE from t1;
DROP TABLE t1;
-#-- disable_query_log
-#-- error 0,ER_UNKNOWN_SYSTEM_VARIABLE
-#SET GLOBAL innodb_change_buffering_debug = 0;
-#-- enable_query_log
-
# The following should fail in non-strict mode too.
# (The fix of Bug #50945 only affects REDUNDANT and COMPACT tables.)
if ($INNODB_PAGE_SIZE == 4096)
diff --git a/mysql-test/suite/innodb_zip/t/restart.test b/mysql-test/suite/innodb_zip/t/restart.test
index c442b919d71..644496d70d6 100644
--- a/mysql-test/suite/innodb_zip/t/restart.test
+++ b/mysql-test/suite/innodb_zip/t/restart.test
@@ -34,12 +34,8 @@ call mtr.add_suppression("\\[ERROR\\] InnoDB: The error means the system cannot
--disable_query_log
let $MYSQL_DATA_DIR= `select @@datadir`;
let $data_directory = DATA DIRECTORY='$MYSQL_TMP_DIR/alt_dir';
-
-let $innodb_file_per_table_orig=`select @@innodb_file_per_table`;
--enable_query_log
-set global innodb_file_per_table=on;
-
--echo #
--echo # Create and insert records into a REDUNDANT row formatted table.
--echo #
@@ -181,7 +177,6 @@ SELECT count(*) FROM t7_restart;
--echo #
--source include/start_mysqld.inc
-SHOW VARIABLES LIKE 'innodb_file_per_table';
SHOW CREATE TABLE t1_restart;
SHOW CREATE TABLE t2_restart;
SHOW CREATE TABLE t3_restart;
@@ -274,8 +269,6 @@ SHOW CREATE TABLE t7_restart;
--echo #
--source include/start_mysqld.inc
-SHOW VARIABLES LIKE 'innodb_file_per_table';
-
--source suite/innodb/include/show_i_s_tablespaces.inc
SELECT count(*) FROM t5_restart;
@@ -364,7 +357,6 @@ SHOW CREATE TABLE t77_restart;
--echo # Restart the server
--echo #
--source include/restart_mysqld.inc
-SHOW VARIABLES LIKE 'innodb_file_per_table';
--source suite/innodb/include/show_i_s_tablespaces.inc
@@ -599,8 +591,3 @@ DROP TABLE t77_restart;
--rmdir $MYSQL_TMP_DIR/alt_dir
--rmdir $MYSQL_TMP_DIR/new_dir/test
--rmdir $MYSQL_TMP_DIR/new_dir
-
--- disable_query_log
-eval set global innodb_file_per_table=$innodb_file_per_table_orig;
--- enable_query_log
-
diff --git a/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test b/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test
index 36dcd1e454b..b5bc2a94f01 100644
--- a/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test
+++ b/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test
@@ -226,21 +226,6 @@ do "$ENV{MTR_SUITE_DIR}/../innodb/include/innodb-util.pl";
ib_restore_tablespaces("test", "t1");
EOF
-# Test failure after ibuf check
-SET SESSION debug_dbug="+d,ib_import_check_bitmap_failure";
-
-# Need proper mapping of error codes :-(
---error ER_NOT_KEYFILE
-ALTER TABLE t1 IMPORT TABLESPACE;
-
-SET SESSION debug_dbug=@saved_debug_dbug;
-
-# Restore files
-perl;
-do "$ENV{MTR_SUITE_DIR}/../innodb/include/innodb-util.pl";
-ib_restore_tablespaces("test", "t1");
-EOF
-
# Test failure after adjusting the cluster index root page
SET SESSION debug_dbug="+d,ib_import_cluster_root_adjust_failure";
diff --git a/mysql-test/suite/innodb_zip/t/wl5522_zip.test b/mysql-test/suite/innodb_zip/t/wl5522_zip.test
index dbb698cc545..f223d36a6de 100644
--- a/mysql-test/suite/innodb_zip/t/wl5522_zip.test
+++ b/mysql-test/suite/innodb_zip/t/wl5522_zip.test
@@ -83,7 +83,7 @@ ALTER TABLE t1 IMPORT TABLESPACE;
SELECT * FROM t1;
DROP TABLE t1;
-# Export/import on the same instance, with --innodb-file-per-table=1
+# Export/import on the same instance
CREATE TABLE t1(
c1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
c2 INT) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2;
@@ -127,7 +127,7 @@ SELECT COUNT(*) FROM t1;
DROP TABLE t1;
-# Export/import on the same instance, with --innodb-file-per-table=1
+# Export/import on the same instance
# Insert some more records to move the LSN forward and then drop the
# table and restore
CREATE TABLE t1(
@@ -174,7 +174,7 @@ SELECT COUNT(*) FROM t1;
DROP TABLE t1;
-# Export/import on the same instance, with --innodb-file-per-table=1
+# Export/import on the same instance
# Insert some more records to move the LSN forward and then drop the
# table and restore, this time the table has a secondary index too.
CREATE TABLE t1(
@@ -217,7 +217,7 @@ SELECT COUNT(*) FROM t1 WHERE c2 = 1;
DROP TABLE t1;
-# Export/import on the same instance, with --innodb-file-per-table=1
+# Export/import on the same instance
# Insert some more records to move the LSN forward and then drop the
# table and restore, this time the table has a secondary index too.
# Rename the index on the create so that the IMPORT fails, drop index
diff --git a/mysql-test/suite/innodb_zip/t/wl6347_comp_indx_stat.test b/mysql-test/suite/innodb_zip/t/wl6347_comp_indx_stat.test
index be9a05b36ce..ae32b03a786 100644
--- a/mysql-test/suite/innodb_zip/t/wl6347_comp_indx_stat.test
+++ b/mysql-test/suite/innodb_zip/t/wl6347_comp_indx_stat.test
@@ -20,7 +20,6 @@ let MYSQLD_DATADIR=`SELECT @@datadir`;
let $innodb_compression_level = `SELECT @@global.innodb_compression_level`;
--echo # set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_cmp_per_index_enabled=ON;
SET GLOBAL innodb_compression_level=0;
--disable_query_log
@@ -73,7 +72,6 @@ SET GLOBAL innodb_read_only_compressed=OFF;
--enable_query_log
--echo # set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_compression_level=0;
--echo # fetch the compressed page and check the stats
@@ -135,7 +133,6 @@ SET @inl_val=2;
SET GLOBAL innodb_cmp_per_index_enabled=ON;
--echo # set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_compression_level=0;
--echo # fetch the compressed page and check the stats
@@ -296,7 +293,6 @@ SET GLOBAL innodb_cmp_per_index_enabled=0;
SET GLOBAL innodb_cmp_per_index_enabled=1;
--echo # set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_compression_level=9;
@@ -345,7 +341,6 @@ SET @inl_val=2;
SET GLOBAL innodb_cmp_per_index_enabled=ON;
--echo # set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_compression_level=9;
@@ -413,7 +408,6 @@ SET @inl_val=2;
SET GLOBAL innodb_cmp_per_index_enabled=ON;
--echo # set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_compression_level=9;
@@ -801,7 +795,6 @@ SET GLOBAL innodb_cmp_per_index_enabled=ON;
--echo # check the flags
SELECT @@innodb_cmp_per_index_enabled;
SELECT @@innodb_compression_failure_threshold_pct;
-SELECT @@innodb_file_per_table;
SELECT @@innodb_compression_level;
--echo #******************************************************************
@@ -848,8 +841,6 @@ SET GLOBAL innodb_cmp_per_index_enabled=ON;
--echo # set the flags
SET GLOBAL innodb_compression_failure_threshold_pct=0;
-SET GLOBAL innodb_file_per_table=on;
-
--echo # fetch the compressed page and check the stats
--echo # The stats figure may be different/same for each restart.
@@ -1053,7 +1044,6 @@ SET GLOBAL innodb_cmp_per_index_enabled=OFF;
SET GLOBAL innodb_cmp_per_index_enabled=ON;
--echo # set the flags
-SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_cmp_per_index_enabled=ON;
SET GLOBAL innodb_compression_failure_threshold_pct=10;
SET GLOBAL innodb_compression_level=Default;
@@ -1062,7 +1052,6 @@ SET GLOBAL innodb_compression_level=Default;
--echo # check the flags
SELECT @@innodb_cmp_per_index_enabled;
SELECT @@innodb_compression_failure_threshold_pct;
-SELECT @@innodb_file_per_table;
SELECT @@innodb_compression_level;
--echo #******************************************************************
@@ -1109,9 +1098,6 @@ SET GLOBAL innodb_cmp_per_index_enabled=ON;
--echo # set the flags
SET GLOBAL innodb_compression_failure_threshold_pct=10;
-SET GLOBAL innodb_file_per_table=on;
-SET GLOBAL innodb_compression_failure_threshold_pct=10;
-
--echo # fetch the compressed page and check the stats
--echo # The stats figure may be different/same for each restart.
@@ -1312,7 +1298,6 @@ DROP TABLE tab5;
SET GLOBAL innodb_cmp_per_index_enabled=0;
SET GLOBAL innodb_cmp_per_index_enabled=1;
-SET GLOBAL innodb_file_per_table=ON;
SET GLOBAL innodb_compression_level=default;
SET GLOBAL innodb_compression_failure_threshold_pct=default;
@@ -1351,7 +1336,6 @@ DROP TABLE tab5, test.tab5;
DROP DATABASE sb;
--echo # reset the flags
-eval SET GLOBAL innodb_file_per_table=default;
eval SET GLOBAL innodb_cmp_per_index_enabled=default;
--disable_query_log
eval SET GLOBAL innodb_compression_level=$innodb_compression_level;
diff --git a/mysql-test/suite/json/r/json_table.result b/mysql-test/suite/json/r/json_table.result
index 44957352865..900348d8a13 100644
--- a/mysql-test/suite/json/r/json_table.result
+++ b/mysql-test/suite/json/r/json_table.result
@@ -211,12 +211,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "tt",
"access_type": "ALL",
+ "loops": 1,
"rows": 40,
+ "cost": "COST_REPLACED",
"filtered": 100,
"table_function": "json_table"
}
diff --git a/mysql-test/suite/json/r/json_table_mysql.result b/mysql-test/suite/json/r/json_table_mysql.result
index 2357d9d3cf0..0be40a8a5c8 100644
--- a/mysql-test/suite/json/r/json_table_mysql.result
+++ b/mysql-test/suite/json/r/json_table_mysql.result
@@ -189,12 +189,15 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
"table_name": "tt",
"access_type": "ALL",
+ "loops": 1,
"rows": 40,
+ "cost": "COST_REPLACED",
"filtered": 100,
"table_function": "json_table"
}
@@ -530,8 +533,7 @@ EXPLAIN SELECT * FROM t1 WHERE id IN
(id INT PATH '$')) AS jt);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED jt ALL NULL NULL NULL NULL 40 Table function: json_table
+1 PRIMARY jt ALL NULL NULL NULL NULL 40 Table function: json_table; Using where; FirstMatch(t1); Using join buffer (flat, BNL join)
DROP TABLE t1;
SELECT * FROM JSON_TABLE('"asdf"', '$' COLUMNS(
tm TIME PATH '$',
diff --git a/mysql-test/suite/json/t/json_table.test b/mysql-test/suite/json/t/json_table.test
index 05db8f66a59..982922ff595 100644
--- a/mysql-test/suite/json/t/json_table.test
+++ b/mysql-test/suite/json/t/json_table.test
@@ -69,6 +69,7 @@ insert into t1 select * from t1;
set @save_optimizer_switch=@@optimizer_switch;
set optimizer_switch='firstmatch=off';
+--sorted_result
select * from
json_table('[{"color": "blue", "price": 50},
{"color": "red", "price": 100}]',
@@ -144,6 +145,7 @@ create view v2 as select * from json_table('[{"co\\\\lor": "blue", "price": 50
select * from v2;
drop view v2;
+--source include/explain-no-costs.inc
explain format=json select * from
json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
explain select * from
diff --git a/mysql-test/suite/json/t/json_table_mysql.test b/mysql-test/suite/json/t/json_table_mysql.test
index 9f77ad964f3..3b0fb556260 100644
--- a/mysql-test/suite/json/t/json_table_mysql.test
+++ b/mysql-test/suite/json/t/json_table_mysql.test
@@ -167,6 +167,7 @@ select * from
#eval $query;
#eval explain $query;
+--source include/explain-no-costs.inc
explain format=json
select * from
json_table(
diff --git a/mysql-test/suite/maria/crash-recursive.result b/mysql-test/suite/maria/crash-recursive.result
new file mode 100644
index 00000000000..998bb9e501b
--- /dev/null
+++ b/mysql-test/suite/maria/crash-recursive.result
@@ -0,0 +1,53 @@
+set @save_big_tables=@@big_tables;
+set big_tables=1;
+Warnings:
+Warning 1287 '@@big_tables' is deprecated and will be removed in a future release
+create table folks(id int, name char(32), dob date, father int, mother int);
+insert into folks values
+(100, 'Me', '2000-01-01', 20, 30),
+(20, 'Dad', '1970-02-02', 10, 9),
+(30, 'Mom', '1975-03-03', 8, 7),
+(10, 'Grandpa Bill', '1940-04-05', null, null),
+(9, 'Grandma Ann', '1941-10-15', null, null),
+(25, 'Uncle Jim', '1968-11-18', 8, 7),
+(98, 'Sister Amy', '2001-06-20', 20, 30),
+(7, 'Grandma Sally', '1943-08-23', null, 6),
+(8, 'Grandpa Ben', '1940-10-21', null, null),
+(6, 'Grandgrandma Martha', '1923-05-17', null, null),
+(67, 'Cousin Eddie', '1992-02-28', 25, 27),
+(27, 'Auntie Melinda', '1971-03-29', null, null);
+call mtr.add_suppression(".*marked as crashed.*");
+SET @saved_dbug= @@SESSION.debug_dbug;
+SET SESSION debug_dbug="+d,ha_rnd_next_error";
+SET @ha_rnd_next_error_counter=110;
+with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+select h.*, w.*
+from folks h, folks w, coupled_ancestors a
+where a.father = h.id AND a.mother = w.id
+union
+select h.*, w.*
+from folks v, folks h, folks w
+where v.name = 'Me' and
+(v.father = h.id AND v.mother= w.id)
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+select h_id, h_name, h_dob, h_father, h_mother
+from ancestor_couples
+union
+select w_id, w_name, w_dob, w_father, w_mother
+from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+from ancestor_couples;
+ERROR HY000: Table '(temporary)' is marked as crashed and should be repaired
+drop table folks;
+set big_tables=@save_big_tables;
+Warnings:
+Warning 1287 '@@big_tables' is deprecated and will be removed in a future release
+SET @@SESSION.debug_dbug=@saved_dbug;
diff --git a/mysql-test/suite/maria/crash-recursive.test b/mysql-test/suite/maria/crash-recursive.test
new file mode 100644
index 00000000000..1fa18ce85c0
--- /dev/null
+++ b/mysql-test/suite/maria/crash-recursive.test
@@ -0,0 +1,67 @@
+#
+# This test simulates an error in an aria file discovered during a recursive SQL call.
+# The error handling causes used join structures to be deleted, which caused crashes in
+# upper levels when trying to access structures that does not exist anymore
+#
+
+--source include/have_debug_sync.inc
+--source include/not_embedded.inc
+
+set @save_big_tables=@@big_tables;
+set big_tables=1;
+
+create table folks(id int, name char(32), dob date, father int, mother int);
+
+insert into folks values
+(100, 'Me', '2000-01-01', 20, 30),
+(20, 'Dad', '1970-02-02', 10, 9),
+(30, 'Mom', '1975-03-03', 8, 7),
+(10, 'Grandpa Bill', '1940-04-05', null, null),
+(9, 'Grandma Ann', '1941-10-15', null, null),
+(25, 'Uncle Jim', '1968-11-18', 8, 7),
+(98, 'Sister Amy', '2001-06-20', 20, 30),
+(7, 'Grandma Sally', '1943-08-23', null, 6),
+(8, 'Grandpa Ben', '1940-10-21', null, null),
+(6, 'Grandgrandma Martha', '1923-05-17', null, null),
+(67, 'Cousin Eddie', '1992-02-28', 25, 27),
+(27, 'Auntie Melinda', '1971-03-29', null, null);
+
+
+call mtr.add_suppression(".*marked as crashed.*");
+SET @saved_dbug= @@SESSION.debug_dbug;
+SET SESSION debug_dbug="+d,ha_rnd_next_error";
+SET @ha_rnd_next_error_counter=110;
+
+let q=
+with recursive
+ancestor_couples(h_id, h_name, h_dob, h_father, h_mother,
+ w_id, w_name, w_dob, w_father, w_mother)
+as
+(
+ select h.*, w.*
+ from folks h, folks w, coupled_ancestors a
+ where a.father = h.id AND a.mother = w.id
+ union
+ select h.*, w.*
+ from folks v, folks h, folks w
+ where v.name = 'Me' and
+ (v.father = h.id AND v.mother= w.id)
+),
+coupled_ancestors (id, name, dob, father, mother)
+as
+(
+ select h_id, h_name, h_dob, h_father, h_mother
+ from ancestor_couples
+ union
+ select w_id, w_name, w_dob, w_father, w_mother
+ from ancestor_couples
+)
+select h_name, h_dob, w_name, w_dob
+ from ancestor_couples;
+
+--error ER_CRASHED_ON_USAGE
+eval $q;
+drop table folks;
+
+set big_tables=@save_big_tables;
+SET @@SESSION.debug_dbug=@saved_dbug; \ No newline at end of file
diff --git a/mysql-test/suite/maria/icp.result b/mysql-test/suite/maria/icp.result
index 43ec6439144..a421ba6d3ea 100644
--- a/mysql-test/suite/maria/icp.result
+++ b/mysql-test/suite/maria/icp.result
@@ -409,7 +409,7 @@ WHERE (pk BETWEEN 4 AND 5 OR pk < 2) AND c1 < 240
ORDER BY c1
LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,k1 PRIMARY 4 NULL 3 Using index condition; Using where; Rowid-ordered scan; Using filesort
+1 SIMPLE t1 range PRIMARY,k1 k1 5 NULL 4 Using index condition; Using where
DROP TABLE t1;
#
#
@@ -450,9 +450,10 @@ c1 INT NOT NULL,
PRIMARY KEY (pk)
);
INSERT INTO t1 VALUES (1,9),(2,7),(3,6),(4,3),(5,1);
+insert into t1 select seq,seq from seq_100_to_110;
EXPLAIN SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 5 Using where
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 16 Using index condition; Rowid-ordered scan
SET SESSION optimizer_switch='index_condition_pushdown=off';
SELECT pk, c1 FROM t1 WHERE (pk<3 or pk>3);
pk c1
@@ -460,6 +461,17 @@ pk c1
2 7
4 3
5 1
+100 100
+101 101
+102 102
+103 103
+104 104
+105 105
+106 106
+107 107
+108 108
+109 109
+110 110
DROP TABLE t1;
set optimizer_switch= @save_optimizer_switch;
#
@@ -677,7 +689,6 @@ DROP TABLE t1;
#
CREATE TABLE t1 (b int NOT NULL, c int, a varchar(1024), PRIMARY KEY (b));
INSERT INTO t1 VALUES (1,4,'Ill');
-insert into t1 select seq+100,5,seq from seq_1_to_100;
CREATE TABLE t2 (a varchar(1024), KEY (a(512)));
INSERT INTO t2 VALUES
('Ill'), ('eckqzsflbzaffti'), ('w'), ('she'), ('gxbwypqtjzwywwer'), ('w');
@@ -687,8 +698,8 @@ EXPLAIN
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL # Using where; Using filesort
-1 SIMPLE t2 ref a a 515 test.t1.a # Using where
+1 SIMPLE t1 system PRIMARY NULL NULL NULL #
+1 SIMPLE t2 ref a a 515 const # Using where
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
b c
@@ -698,8 +709,8 @@ EXPLAIN
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL # Using where; Using filesort
-1 SIMPLE t2 ref a a 515 test.t1.a # Using where
+1 SIMPLE t1 system PRIMARY NULL NULL NULL #
+1 SIMPLE t2 ref a a 515 const # Using where
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND (t1.b<0 OR t1.b>0)
HAVING t1.c != 5 ORDER BY t1.c;
b c
@@ -811,6 +822,8 @@ test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
SET @save_optimize_switch=@@optimizer_switch;
SET optimizer_switch='materialization=on';
+set @save_optimizer_where_cost=@@optimizer_where_cost;
+set @@optimizer_where_cost=1;
EXPLAIN
SELECT COUNT(*) FROM t1 AS t, t2
WHERE c = g
@@ -834,6 +847,7 @@ OR a = 0 AND h < 'z' );
COUNT(*)
1478
SET optimizer_switch=@save_optimizer_switch;
+set @@optimizer_where_cost=@save_optimizer_where_cost;
DROP TABLE t1,t2;
# check "Handler_pushed" status varuiables
CREATE TABLE t1 (
diff --git a/mysql-test/suite/maria/mrr.result b/mysql-test/suite/maria/mrr.result
index 066f1a50aab..2c8c289f4a6 100644
--- a/mysql-test/suite/maria/mrr.result
+++ b/mysql-test/suite/maria/mrr.result
@@ -364,9 +364,9 @@ EXPLAIN
SELECT COUNT(t1.v) FROM t1, t2 IGNORE INDEX (idx), t3 IGNORE INDEX (idx)
WHERE t3.v = t2.v AND t3.i < t2.i AND t3.pk > 0 AND t2.pk > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL idx 7 NULL 15 Using index
-1 SIMPLE t2 ALL PRIMARY NULL NULL NULL 16 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE t3 ALL PRIMARY NULL NULL NULL 25 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 16 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 index NULL idx 7 NULL 15 Using index; Using join buffer (flat, BNL join)
+1 SIMPLE t3 range PRIMARY PRIMARY 4 NULL 25 Using index condition; Using where; Rowid-ordered scan; Using join buffer (flat, BNL join)
SELECT COUNT(t1.v) FROM t1, t2, t3
WHERE t3.v = t2.v AND t3.i < t2.i AND t3.pk > 0 AND t2.pk > 0;
COUNT(t1.v)
@@ -375,9 +375,9 @@ EXPLAIN
SELECT COUNT(t1.v) FROM t1, t2, t3
WHERE t3.v = t2.v AND t3.i < t2.i AND t3.pk > 0 AND t2.pk > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL idx 7 NULL 15 Using index
-1 SIMPLE t2 ALL PRIMARY,idx NULL NULL NULL 16 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 range PRIMARY,idx PRIMARY 4 NULL 16 Using index condition; Rowid-ordered scan
1 SIMPLE t3 ref PRIMARY,idx idx 3 test.t2.v 2 Using index condition; Using where
+1 SIMPLE t1 index NULL idx 7 NULL 15 Using index; Using join buffer (flat, BNL join)
set join_cache_level=@save_join_cache_level;
DROP TABLE t1,t2,t3;
#
@@ -405,7 +405,7 @@ WHERE
table1.col_varchar_1024_latin1_key = table2.col_varchar_10_latin1 AND table1.pk<>0 ;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE table2 ALL NULL NULL NULL NULL 2 Using where
-1 SIMPLE table1 ref PRIMARY,col_varchar_1024_latin1_key col_varchar_1024_latin1_key 1027 test.table2.col_varchar_10_latin1 2 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE table1 ref PRIMARY,col_varchar_1024_latin1_key col_varchar_1024_latin1_key 1027 test.table2.col_varchar_10_latin1 1 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT count(*)
FROM t1 AS table1, t2 AS table2
WHERE
diff --git a/mysql-test/suite/mariabackup/mdev-14447.result b/mysql-test/suite/mariabackup/mdev-14447.result
index 16d3ab561f6..5db4693d75f 100644
--- a/mysql-test/suite/mariabackup/mdev-14447.result
+++ b/mysql-test/suite/mariabackup/mdev-14447.result
@@ -1,5 +1,7 @@
call mtr.add_suppression("InnoDB: New log files created");
SET GLOBAL innodb_file_per_table=0;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t(a varchar(40) PRIMARY KEY, b varchar(40), c varchar(40), d varchar(40), index(b,c,d)) ENGINE INNODB;
# Create full backup , modify table, then create incremental/differential backup
SET debug_dbug='+d,skip_page_checksum',foreign_key_checks=0,unique_checks=0;
diff --git a/mysql-test/suite/mariabackup/xb_compressed_encrypted.opt b/mysql-test/suite/mariabackup/xb_compressed_encrypted.opt
index 4105fecfe58..52ef47f3fca 100644
--- a/mysql-test/suite/mariabackup/xb_compressed_encrypted.opt
+++ b/mysql-test/suite/mariabackup/xb_compressed_encrypted.opt
@@ -5,4 +5,3 @@
--loose-file-key-management
--loose-file-key-management-filename=$MYSQL_TEST_DIR/std_data/logkey.txt
--innodb_strict_mode
---innodb_file_per_table
diff --git a/mysql-test/suite/mariabackup/xb_fulltext_encrypted.opt b/mysql-test/suite/mariabackup/xb_fulltext_encrypted.opt
index b3ef366361a..e3a9e5a437f 100644
--- a/mysql-test/suite/mariabackup/xb_fulltext_encrypted.opt
+++ b/mysql-test/suite/mariabackup/xb_fulltext_encrypted.opt
@@ -1,6 +1,5 @@
--plugin-load-add=$FILE_KEY_MANAGEMENT_SO
--innodb_strict_mode
---innodb_file_per_table
--innodb-encryption-rotate-key-age=2
--innodb-encryption-threads=4
--innodb-tablespaces-encryption
diff --git a/mysql-test/suite/mtr/t/simple,c2,s1.rdiff b/mysql-test/suite/mtr/t/simple,c2,s1.rdiff
index 3023756aa8a..91f800a6211 100644
--- a/mysql-test/suite/mtr/t/simple,c2,s1.rdiff
+++ b/mysql-test/suite/mtr/t/simple,c2,s1.rdiff
@@ -1,5 +1,5 @@
---- suite/mtr/t/simple.result 2012-02-04 12:13:41.000000000 +0100
-+++ suite/mtr/t/simple,infile,verbose.reject 2012-02-04 12:16:10.000000000 +0100
+--- suite/mtr/t/simple.result
++++ suite/mtr/t/simple,infile,verbose.reject
@@ -3,7 +3,7 @@
proxy
select @@local_infile;
diff --git a/mysql-test/suite/mtr/t/simple,s2,c2.rdiff b/mysql-test/suite/mtr/t/simple,s2,c2.rdiff
index a9b9b56ef1c..a4c9ab968c5 100644
--- a/mysql-test/suite/mtr/t/simple,s2,c2.rdiff
+++ b/mysql-test/suite/mtr/t/simple,s2,c2.rdiff
@@ -1,5 +1,5 @@
---- suite/mtr/t/simple,old.result 2012-02-04 12:13:25.000000000 +0100
-+++ suite/mtr/t/simple,old,infile.reject 2012-02-04 12:13:59.000000000 +0100
+--- suite/mtr/t/simple,old.result
++++ suite/mtr/t/simple,old,infile.reject
@@ -3,7 +3,7 @@
proxy
select @@local_infile;
diff --git a/mysql-test/suite/parts/inc/partition_decimal.inc b/mysql-test/suite/parts/inc/partition_decimal.inc
index 93e9e48c9c9..4ad2a000355 100644
--- a/mysql-test/suite/parts/inc/partition_decimal.inc
+++ b/mysql-test/suite/parts/inc/partition_decimal.inc
@@ -6,9 +6,11 @@ partition pa3 max_rows=30 min_rows=4,
partition pa4 max_rows=40 min_rows=2);
show create table t1;
insert into t1 values (999999.9999), (-999999.9999), (123456.7899), (-123456.7899), (-1.5), (1), (0), (-1), (1.5), (1234.567), (-1234.567);
+--sorted_result
select * from t1;
select * from t1 where a=1234.567;
delete from t1 where a=1234.567;
+--sorted_result
select * from t1;
drop table t1;
@@ -16,9 +18,11 @@ eval create table t2 (a decimal(18,9) not null, primary key(a)) engine=$engine
partition by key (a) partitions 10;
show create table t2;
insert into t2 values (999999999.999999999), (-999999999.999999999), (-1.5), (-1), (0), (1.5), (1234.567), (-1234.567);
+--sorted_result
select * from t2;
select * from t2 where a=1234.567;
delete from t2 where a=1234.567;
+--sorted_result
select * from t2;
delete from t2;
let $count=$maxrows;
diff --git a/mysql-test/suite/parts/inc/partition_double.inc b/mysql-test/suite/parts/inc/partition_double.inc
index 9e43887be09..dd2fd10090d 100644
--- a/mysql-test/suite/parts/inc/partition_double.inc
+++ b/mysql-test/suite/parts/inc/partition_double.inc
@@ -6,9 +6,11 @@ partition pa3 max_rows=30 min_rows=4,
partition pa4 max_rows=40 min_rows=2);
show create table t1;
insert into t1 values (-2.2250738585072014E+208), (-2.2250738585072014E-208), (-1.5), (-1), (0), (1.5), (1234.567), (2.2250738585072014E+208);
+--sorted_result
select * from t1;
select * from t1 where a=1.5;
delete from t1 where a=1.5;
+--sorted_result
select * from t1;
drop table t1;
@@ -16,9 +18,11 @@ eval create table t2 (a double not null, primary key(a)) engine=$engine
partition by key (a) partitions 10;
show create table t2;
insert into t2 values (-2.2250738585072014E+208), (-2.2250738585072014E-208), (-1.5), (-1), (0), (1.5), (1234.567), (2.2250738585072014E+208);
+--sorted_result
select * from t2;
select * from t2 where a=1234.567;
delete from t2 where a=1234.567;
+--sorted_result
select * from t2;
delete from t2;
let $count=$maxrows;
diff --git a/mysql-test/suite/parts/inc/partition_key_16col.inc b/mysql-test/suite/parts/inc/partition_key_16col.inc
index 988dc4554ab..e7917451f1f 100644
--- a/mysql-test/suite/parts/inc/partition_key_16col.inc
+++ b/mysql-test/suite/parts/inc/partition_key_16col.inc
@@ -10,6 +10,7 @@ insert into t1 values
('1983-12-31', 'cdef', 'srtbvsr', 'w', 45634, 13452.56, 3452346456, 127,'1983-12-31', 'cdef', 'srtbvsr', 'w', 45634, 13452.56, 3452346456, 127, 'liuugbzvdmrlti b itiortudirtfgtibm dfi'),
('1980-10-14', 'fgbbd', 'dtzndtz', 'w', 67856, 5463354.67, 3567845333, 124,'1980-10-14', 'fgbbd', 'dtzndtz', 'w', 67856, 5463354.67, 3567845333, 124, 'd,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr'),
('2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, 'pib mdotkbm.m');
+--sorted_result
select * from t1;
select * from t1 where a<19851231;
drop table t1;
diff --git a/mysql-test/suite/parts/inc/partition_key_32col.inc b/mysql-test/suite/parts/inc/partition_key_32col.inc
index 0acedefaa8e..880751db52a 100644
--- a/mysql-test/suite/parts/inc/partition_key_32col.inc
+++ b/mysql-test/suite/parts/inc/partition_key_32col.inc
@@ -28,6 +28,7 @@ insert into t1 values
('1983-12-31', 'cdef', 'srtbvsr', 'w', 45634, 13452.56, 3452346456, 127,'1983-12-31', 'cdef', 'srtbvsr', 'w', 45634, 13452.56, 3452346456, 127, '1983-12-31', 'cdef', 'srtbvsr', 'w', 45634, 13452.56, 3452346456, 127, '1983-12-31', 'cdef', 'srtbvsr', 'w', 45634, 13452.56, 3452346456, 127, 'liuugbzvdmrlti b itiortudirtfgtibm dfi'),
('1980-10-14', 'fgbbd', 'dtzndtz', 'w', 67856, 5463354.67, 3567845333, 124, '1980-10-14', 'fgbbd', 'dtzndtz', 'w', 67856, 5463354.67, 3567845333, 124, '1980-10-14', 'fgbbd', 'dtzndtz', 'w', 67856, 5463354.67, 3567845333, 124, '1980-10-14', 'fgbbd', 'dtzndtz', 'w', 67856, 5463354.67, 3567845333, 124, 'd,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr'),
('2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, 'pib mdotkbm.m');
+--sorted_result
select * from t1;
select * from t1 where a<19851231;
drop table t1;
diff --git a/mysql-test/suite/parts/inc/partition_key_4col.inc b/mysql-test/suite/parts/inc/partition_key_4col.inc
index a94ab581620..706c8addc7e 100644
--- a/mysql-test/suite/parts/inc/partition_key_4col.inc
+++ b/mysql-test/suite/parts/inc/partition_key_4col.inc
@@ -10,6 +10,7 @@ insert into t1 values
('1983-12-31', 'cdef', 'srtbvsr', 'w'),
('1980-10-14', 'fgbbd', 'dtzndtz', 'w'),
('2000-06-15', 'jukg','zikhuk','m');
+--sorted_result
select * from t1;
select * from t1 where a<19851231;
drop table t1;
diff --git a/mysql-test/suite/parts/inc/partition_key_8col.inc b/mysql-test/suite/parts/inc/partition_key_8col.inc
index fcbab7c355d..1e49ee0b342 100644
--- a/mysql-test/suite/parts/inc/partition_key_8col.inc
+++ b/mysql-test/suite/parts/inc/partition_key_8col.inc
@@ -10,6 +10,7 @@ insert into t1 values
('1983-12-31', 'cdef', 'srtbvsr', 'w', 45634, 13452.56, 3452346456, 127, 'liuugbzvdmrlti b itiortudirtfgtibm dfi'),
('1980-10-14', 'fgbbd', 'dtzndtz', 'w', 67856, 5463354.67, 3567845333, 124, 'd,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr'),
('2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, 'pib mdotkbm.m' );
+--sorted_result
select * from t1;
select * from t1 where a<19851231;
drop table t1;
diff --git a/mysql-test/suite/parts/inc/partition_time.inc b/mysql-test/suite/parts/inc/partition_time.inc
index 674fe546a1f..5f9466ba64e 100644
--- a/mysql-test/suite/parts/inc/partition_time.inc
+++ b/mysql-test/suite/parts/inc/partition_time.inc
@@ -57,7 +57,7 @@ dec $count;
commit;
--enable_query_log
select count(*) from t3;
-select * from t3;
+select a, second(a), if(second(a)<16,1,if(second(a)<31,2,if(second(a)<45,3,4))) from t3;
drop table t3;
eval create table t4 (a time not null, primary key(a)) engine=$engine
diff --git a/mysql-test/suite/parts/inc/partition_timestamp.inc b/mysql-test/suite/parts/inc/partition_timestamp.inc
index fb1bf391999..53334c13af0 100644
--- a/mysql-test/suite/parts/inc/partition_timestamp.inc
+++ b/mysql-test/suite/parts/inc/partition_timestamp.inc
@@ -6,9 +6,11 @@ partition pa3 max_rows=30 min_rows=4,
partition pa4 max_rows=40 min_rows=2);
show create table t1;
insert into t1 values ('1975-01-01 21:21:21'), ('2020-12-31 12:10:30'), ('1980-10-14 03:03'), ('2000-06-15 23:59');
+--sorted_result
select * from t1;
select * from t1 where a=19801014030300;
delete from t1 where a=19801014030300;
+--sorted_result
select * from t1;
drop table t1;
@@ -16,9 +18,11 @@ eval create table t2 (a timestamp not null DEFAULT CURRENT_TIMESTAMP ON UPDATE C
partition by key (a) partitions 12;
show create table t2;
insert into t2 values ('1975-01-01 0:1:1'), ('2020-12-31 10:11:12'), ('1980-10-14 13:14:15'), ('2000-06-15 14:15:16');
+--sorted_result
select * from t2;
select * from t2 where a='1980-10-14 13:14:15';
delete from t2 where a='1980-10-14 13:14:15';
+--sorted_result
select * from t2;
delete from t2;
let $count=59;
@@ -33,6 +37,7 @@ dec $count;
commit;
--enable_query_log
select count(*) from t2;
+--sorted_result
select * from t2;
drop table t2;
diff --git a/mysql-test/suite/parts/r/alter_data_directory_innodb.result b/mysql-test/suite/parts/r/alter_data_directory_innodb.result
index 8c07093f127..e8f976372d6 100644
--- a/mysql-test/suite/parts/r/alter_data_directory_innodb.result
+++ b/mysql-test/suite/parts/r/alter_data_directory_innodb.result
@@ -40,6 +40,8 @@ t CREATE TABLE `t` (
PARTITION `p2` DATA DIRECTORY = 'MYSQLTEST_VARDIR/tmp/partitions_here' ENGINE = InnoDB)
SET @TMP = @@GLOBAL.INNODB_FILE_PER_TABLE;
SET GLOBAL INNODB_FILE_PER_TABLE=OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
ALTER TABLE t ADD PRIMARY KEY pk(a), ALGORITHM=INPLACE;
Warnings:
Warning 1280 Name 'pk' ignored for PRIMARY key.
@@ -53,6 +55,8 @@ t CREATE TABLE `t` (
(PARTITION `p1` DATA DIRECTORY = 'MYSQLTEST_VARDIR/tmp/partitions_here' ENGINE = InnoDB,
PARTITION `p2` DATA DIRECTORY = 'MYSQLTEST_VARDIR/tmp/partitions_here' ENGINE = InnoDB)
SET GLOBAL INNODB_FILE_PER_TABLE=@TMP;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
ALTER TABLE t REORGANIZE PARTITION p1,p2 INTO (
PARTITION p1 DATA DIRECTORY = 'MYSQLTEST_VARDIR/tmp/partitions_somewhere_else/' ENGINE = INNODB,
PARTITION p2 DATA DIRECTORY = 'MYSQLTEST_VARDIR/tmp/partitions_somewhere_else/' ENGINE = INNODB
diff --git a/mysql-test/suite/parts/r/longname.result b/mysql-test/suite/parts/r/longname.result
index eb850d6f16c..8524eb8499d 100644
--- a/mysql-test/suite/parts/r/longname.result
+++ b/mysql-test/suite/parts/r/longname.result
@@ -31,6 +31,8 @@ PARTITION pmax VALUES LESS THAN MAXVALUE ENGINE = InnoDB);
ERROR HY000: The path specified for @0n@0n@0n@0n@0n@0n@0n@0n@0n@0n@0n@0n@0n@0n@0n@0n@0n@0n@0n@0n@... is too long
SET @file_per_table=@@GLOBAL.innodb_file_per_table;
SET GLOBAL innodb_file_per_table=0;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE mysqltest1.t1 (a INT) ENGINE=INNODB
PARTITION BY RANGE (a) SUBPARTITION BY HASH(a)
(PARTITION `$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$`
@@ -41,6 +43,8 @@ SUBPARTITION
`0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef`)
);
SET GLOBAL innodb_file_per_table=@file_per_table;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
SHOW CREATE TABLE mysqltest1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/suite/parts/r/optimizer.result b/mysql-test/suite/parts/r/optimizer.result
index 42d85dbbd39..95f0e561b0a 100644
--- a/mysql-test/suite/parts/r/optimizer.result
+++ b/mysql-test/suite/parts/r/optimizer.result
@@ -22,10 +22,10 @@ INSERT INTO t2 SELECT * FROM t1;
# plans should be identical
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10,100) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 5 NULL 2 Using where; Using index
+1 SIMPLE t2 range a a 5 NULL 2 Using where; Using index for group-by
FLUSH status;
SELECT a, MAX(b) FROM t1 WHERE a IN (10, 100) GROUP BY a;
a MAX(b)
@@ -33,7 +33,7 @@ a MAX(b)
# Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
Variable_name Value
-Handler_read_key 2
+Handler_read_key 4
FLUSH status;
SELECT a, MAX(b) FROM t2 WHERE a IN (10, 100) GROUP BY a;
a MAX(b)
@@ -41,5 +41,5 @@ a MAX(b)
# Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
Variable_name Value
-Handler_read_key 2
+Handler_read_key 4
DROP TABLE t1, t2;
diff --git a/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result b/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result
index 9d05d04ccca..6d361bb1465 100644
--- a/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result
+++ b/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result
@@ -8,6 +8,8 @@ SET SESSION innodb_strict_mode = ON;
# InnoDB only supports DATA DIRECTORY with innodb_file_per_table=ON
#
SET GLOBAL innodb_file_per_table = OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t1 (c1 INT) ENGINE = InnoDB
PARTITION BY HASH (c1) (
PARTITION p0
@@ -36,6 +38,8 @@ Error 6 Error on delete of 'MYSQLD_DATADIR/test/t1.par' (Errcode: 2 "No such fil
#
SET SESSION innodb_strict_mode = OFF;
SET GLOBAL innodb_file_per_table = ON;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t1 (c1 INT) ENGINE = InnoDB
PARTITION BY HASH (c1)
(PARTITION p0
@@ -134,6 +138,8 @@ DROP TABLE t1;
# properly when used with DATA DIRECTORY
#
SET GLOBAL innodb_file_per_table = ON;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
CREATE TABLE t1
(
myid INT(11) NOT NULL,
@@ -326,4 +332,6 @@ DROP TABLE t1, t2;
# Cleanup
#
SET GLOBAL innodb_file_per_table=@file_per_table;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
SET SESSION innodb_strict_mode=@strict_mode;
diff --git a/mysql-test/suite/parts/r/partition_char_innodb.result b/mysql-test/suite/parts/r/partition_char_innodb.result
index 3131ba58cdf..da6f9bb6c27 100644
--- a/mysql-test/suite/parts/r/partition_char_innodb.result
+++ b/mysql-test/suite/parts/r/partition_char_innodb.result
Binary files differ
diff --git a/mysql-test/suite/parts/r/partition_datetime_innodb.result b/mysql-test/suite/parts/r/partition_datetime_innodb.result
index 8779bfeafa7..f00b7a3e478 100644
--- a/mysql-test/suite/parts/r/partition_datetime_innodb.result
+++ b/mysql-test/suite/parts/r/partition_datetime_innodb.result
@@ -145,19 +145,19 @@ t1 CREATE TABLE `t1` (
insert into t1 values ('1975-01-01'), ('2020-12-31'), ('1980-10-14'), ('2000-06-15');
select * from t1;
a
-1975-01-01
1980-10-14
-2000-06-15
2020-12-31
+1975-01-01
+2000-06-15
select * from t1 where a=19801014;
a
1980-10-14
delete from t1 where a=19801014;
select * from t1;
a
+2020-12-31
1975-01-01
2000-06-15
-2020-12-31
drop table t1;
create table t2 (a date not null, primary key(a)) engine='InnoDB'
partition by key (a) partitions 12;
@@ -174,8 +174,8 @@ select * from t2;
a
1975-01-01
1980-10-14
-2000-06-15
2020-12-31
+2000-06-15
select * from t2 where a='1980-10-14';
a
1980-10-14
@@ -183,8 +183,8 @@ delete from t2 where a='1980-10-14';
select * from t2;
a
1975-01-01
-2000-06-15
2020-12-31
+2000-06-15
delete from t2;
28 inserts;
select count(*) from t2;
@@ -192,90 +192,90 @@ count(*)
84
select * from t2;
a
-1970-01-01
-1970-01-02
-1970-01-03
1970-01-04
-1970-01-05
-1970-01-06
-1970-01-07
-1970-01-08
-1970-01-09
-1970-01-10
-1970-01-11
1970-01-12
-1970-01-13
1970-01-14
-1970-01-15
-1970-01-16
-1970-01-17
-1970-01-18
-1970-01-19
-1970-01-20
-1970-01-21
-1970-01-22
-1970-01-23
1970-01-24
-1970-01-25
-1970-01-26
-1970-01-27
1970-01-28
-1970-02-01
1970-02-02
-1970-02-03
1970-02-04
-1970-02-05
-1970-02-06
-1970-02-07
-1970-02-08
-1970-02-09
1970-02-10
-1970-02-11
-1970-02-12
-1970-02-13
1970-02-14
-1970-02-15
1970-02-16
-1970-02-17
-1970-02-18
-1970-02-19
-1970-02-20
-1970-02-21
-1970-02-22
-1970-02-23
-1970-02-24
-1970-02-25
-1970-02-26
-1970-02-27
-1970-02-28
-1970-03-01
-1970-03-02
-1970-03-03
1970-03-04
-1970-03-05
1970-03-06
+1970-03-12
+1970-01-13
+1970-01-27
+1970-02-03
+1970-02-09
+1970-02-13
+1970-02-21
+1970-03-05
1970-03-07
-1970-03-08
1970-03-09
-1970-03-10
-1970-03-11
-1970-03-12
-1970-03-13
-1970-03-14
1970-03-15
+1970-03-23
+1970-01-06
+1970-01-08
+1970-01-16
+1970-01-18
+1970-01-22
+1970-02-06
+1970-02-12
+1970-02-20
+1970-02-22
+1970-02-28
+1970-03-02
+1970-03-08
1970-03-16
-1970-03-17
-1970-03-18
-1970-03-19
1970-03-20
-1970-03-21
1970-03-22
-1970-03-23
1970-03-24
+1970-01-05
+1970-01-07
+1970-01-11
+1970-01-15
+1970-01-21
+1970-02-01
+1970-02-11
+1970-02-15
+1970-02-19
+1970-03-03
+1970-03-11
+1970-03-13
+1970-03-17
+1970-03-19
1970-03-25
-1970-03-26
1970-03-27
+1970-01-02
+1970-01-10
+1970-01-20
+1970-01-26
+1970-02-08
+1970-02-18
+1970-02-24
+1970-02-26
+1970-03-10
+1970-03-14
+1970-03-18
+1970-03-26
1970-03-28
+1970-01-01
+1970-01-03
+1970-01-09
+1970-01-17
+1970-01-19
+1970-01-23
+1970-01-25
+1970-02-05
+1970-02-07
+1970-02-17
+1970-02-23
+1970-02-25
+1970-02-27
+1970-03-01
+1970-03-21
drop table t2;
create table t3 (a date not null, primary key(a)) engine='InnoDB'
partition by range (month(a)) subpartition by key (a)
@@ -304,18 +304,18 @@ count(*)
12
select * from t3;
a
-1970-01-01
1970-02-01
+1970-01-01
1970-03-01
1970-04-01
1970-05-01
1970-06-01
+1970-09-01
1970-07-01
1970-08-01
-1970-09-01
+1970-12-01
1970-10-01
1970-11-01
-1970-12-01
drop table t3;
create table t4 (a date not null, primary key(a)) engine='InnoDB'
partition by list (month(a)) subpartition by key (a)
@@ -344,18 +344,18 @@ count(*)
12
select * from t4;
a
-1970-01-01
1970-02-01
+1970-01-01
1970-03-01
1970-04-01
1970-05-01
1970-06-01
+1970-09-01
1970-07-01
1970-08-01
-1970-09-01
+1970-12-01
1970-10-01
1970-11-01
-1970-12-01
drop table t4;
create table t1 (a time not null, primary key(a)) engine='InnoDB'
partition by key (a) (
@@ -378,18 +378,18 @@ insert into t1 values ('21:21:21'), ('12:10:30'), ('03:03:03'), ('23:59');
select * from t1;
a
03:03:03
-12:10:30
21:21:21
23:59:00
+12:10:30
select * from t1 where a=030303;
a
03:03:03
delete from t1 where a=030303;
select * from t1;
a
-12:10:30
21:21:21
23:59:00
+12:10:30
drop table t1;
create table t2 (a time not null, primary key(a)) engine='InnoDB'
partition by key (a) partitions 12;
@@ -404,19 +404,19 @@ PARTITIONS 12
insert into t2 values ('0:1:1'), ('10:11:12'), ('13:14:15'), ('14:15:16');
select * from t2;
a
-00:01:01
10:11:12
13:14:15
14:15:16
+00:01:01
select * from t2 where a='13:14:15';
a
13:14:15
delete from t2 where a='13:14:15';
select * from t2;
a
-00:01:01
10:11:12
14:15:16
+00:01:01
delete from t2;
59 inserts;
select count(*) from t2;
@@ -424,65 +424,65 @@ count(*)
59
select * from t2;
a
-00:01:01
-00:01:02
-00:01:03
-00:01:04
+00:01:15
+00:01:23
+00:01:39
+00:01:47
+00:01:59
+00:01:16
+00:01:24
+00:01:40
+00:01:48
00:01:05
-00:01:06
-00:01:07
-00:01:08
-00:01:09
-00:01:10
-00:01:11
-00:01:12
00:01:13
+00:01:21
+00:01:37
+00:01:45
+00:01:57
00:01:14
-00:01:15
-00:01:16
-00:01:17
-00:01:18
+00:01:22
+00:01:38
+00:01:46
+00:01:58
+00:01:03
+00:01:11
00:01:19
+00:01:35
+00:01:43
+00:01:55
+00:01:04
+00:01:12
00:01:20
-00:01:21
-00:01:22
-00:01:23
-00:01:24
-00:01:25
-00:01:26
-00:01:27
-00:01:28
+00:01:36
+00:01:44
+00:01:56
+00:01:01
+00:01:09
00:01:29
-00:01:30
-00:01:31
-00:01:32
00:01:33
-00:01:34
-00:01:35
-00:01:36
-00:01:37
-00:01:38
-00:01:39
-00:01:40
00:01:41
+00:01:53
+00:01:02
+00:01:10
+00:01:18
+00:01:34
00:01:42
-00:01:43
-00:01:44
-00:01:45
-00:01:46
-00:01:47
-00:01:48
-00:01:49
-00:01:50
+00:01:54
+00:01:07
+00:01:27
+00:01:31
00:01:51
+00:01:08
+00:01:28
+00:01:32
00:01:52
-00:01:53
-00:01:54
-00:01:55
-00:01:56
-00:01:57
-00:01:58
-00:01:59
+00:01:17
+00:01:25
+00:01:49
+00:01:06
+00:01:26
+00:01:30
+00:01:50
drop table t2;
create table t3 (a time not null, primary key(a)) engine='InnoDB'
partition by range (second(a)) subpartition by key (a)
@@ -509,67 +509,67 @@ SUBPARTITIONS 3
select count(*) from t3;
count(*)
59
-select * from t3;
-a
-10:00:01
-10:00:02
-10:00:03
-10:00:04
-10:00:05
-10:00:06
-10:00:07
-10:00:08
-10:00:09
-10:00:10
-10:00:11
-10:00:12
-10:00:13
-10:00:14
-10:00:15
-10:00:16
-10:00:17
-10:00:18
-10:00:19
-10:00:20
-10:00:21
-10:00:22
-10:00:23
-10:00:24
-10:00:25
-10:00:26
-10:00:27
-10:00:28
-10:00:29
-10:00:30
-10:00:31
-10:00:32
-10:00:33
-10:00:34
-10:00:35
-10:00:36
-10:00:37
-10:00:38
-10:00:39
-10:00:40
-10:00:41
-10:00:42
-10:00:43
-10:00:44
-10:00:45
-10:00:46
-10:00:47
-10:00:48
-10:00:49
-10:00:50
-10:00:51
-10:00:52
-10:00:53
-10:00:54
-10:00:55
-10:00:56
-10:00:57
-10:00:58
-10:00:59
+select a, second(a), if(second(a)<16,1,if(second(a)<31,2,if(second(a)<45,3,4))) from t3;
+a second(a) if(second(a)<16,1,if(second(a)<31,2,if(second(a)<45,3,4)))
+10:00:01 1 1
+10:00:06 6 1
+10:00:07 7 1
+10:00:14 14 1
+10:00:15 15 1
+10:00:02 2 1
+10:00:03 3 1
+10:00:08 8 1
+10:00:09 9 1
+10:00:04 4 1
+10:00:05 5 1
+10:00:10 10 1
+10:00:11 11 1
+10:00:12 12 1
+10:00:13 13 1
+10:00:20 20 2
+10:00:21 21 2
+10:00:26 26 2
+10:00:27 27 2
+10:00:16 16 2
+10:00:17 17 2
+10:00:22 22 2
+10:00:23 23 2
+10:00:28 28 2
+10:00:29 29 2
+10:00:18 18 2
+10:00:19 19 2
+10:00:24 24 2
+10:00:25 25 2
+10:00:30 30 2
+10:00:32 32 3
+10:00:33 33 3
+10:00:35 35 3
+10:00:40 40 3
+10:00:41 41 3
+10:00:34 34 3
+10:00:36 36 3
+10:00:37 37 3
+10:00:42 42 3
+10:00:43 43 3
+10:00:31 31 3
+10:00:38 38 3
+10:00:39 39 3
+10:00:44 44 3
+10:00:45 45 4
+10:00:46 46 4
+10:00:48 48 4
+10:00:49 49 4
+10:00:54 54 4
+10:00:55 55 4
+10:00:50 50 4
+10:00:51 51 4
+10:00:56 56 4
+10:00:57 57 4
+10:00:59 59 4
+10:00:47 47 4
+10:00:52 52 4
+10:00:53 53 4
+10:00:58 58 4
drop table t3;
create table t4 (a time not null, primary key(a)) engine='InnoDB'
partition by list (second(a)) subpartition by key (a)
@@ -599,64 +599,64 @@ count(*)
select * from t4;
a
10:00:01
-10:00:02
-10:00:03
-10:00:04
-10:00:05
10:00:06
10:00:07
+10:00:14
+10:00:15
+10:00:02
+10:00:03
10:00:08
10:00:09
+10:00:04
+10:00:05
10:00:10
10:00:11
10:00:12
10:00:13
-10:00:14
-10:00:15
-10:00:16
-10:00:17
-10:00:18
-10:00:19
10:00:20
10:00:21
-10:00:22
-10:00:23
-10:00:24
-10:00:25
10:00:26
10:00:27
+10:00:16
+10:00:17
+10:00:22
+10:00:23
10:00:28
10:00:29
+10:00:18
+10:00:19
+10:00:24
+10:00:25
10:00:30
-10:00:31
10:00:32
10:00:33
-10:00:34
10:00:35
-10:00:36
-10:00:37
-10:00:38
-10:00:39
10:00:40
10:00:41
+10:00:34
+10:00:36
+10:00:37
10:00:42
10:00:43
+10:00:31
+10:00:38
+10:00:39
10:00:44
10:00:45
10:00:46
-10:00:47
10:00:48
10:00:49
-10:00:50
-10:00:51
-10:00:52
-10:00:53
10:00:54
10:00:55
+10:00:50
+10:00:51
10:00:56
10:00:57
-10:00:58
10:00:59
+10:00:47
+10:00:52
+10:00:53
+10:00:58
drop table t4;
create table t1 (a datetime not null, primary key(a)) engine='InnoDB'
partition by key (a) (
@@ -679,9 +679,9 @@ insert into t1 values ('1975-01-01 21:21:21'), ('2020-12-31 12:10:30'), ('1980-1
select * from t1;
a
1975-01-01 21:21:21
-1980-10-14 03:03:00
2000-06-15 23:59:00
2020-12-31 12:10:30
+1980-10-14 03:03:00
select * from t1 where a=19801014030300;
a
1980-10-14 03:03:00
@@ -705,19 +705,19 @@ PARTITIONS 12
insert into t2 values ('1975-01-01 0:1:1'), ('2020-12-31 10:11:12'), ('1980-10-14 13:14:15'), ('2000-06-15 14:15:16');
select * from t2;
a
-1975-01-01 00:01:01
-1980-10-14 13:14:15
-2000-06-15 14:15:16
2020-12-31 10:11:12
+2000-06-15 14:15:16
+1980-10-14 13:14:15
+1975-01-01 00:01:01
select * from t2 where a='1980-10-14 13:14:15';
a
1980-10-14 13:14:15
delete from t2 where a='1980-10-14 13:14:15';
select * from t2;
a
-1975-01-01 00:01:01
-2000-06-15 14:15:16
2020-12-31 10:11:12
+2000-06-15 14:15:16
+1975-01-01 00:01:01
delete from t2;
59 inserts;
select count(*) from t2;
@@ -725,65 +725,65 @@ count(*)
59
select * from t2;
a
-1970-01-01 00:00:01
-1970-01-01 00:00:02
-1970-01-01 00:00:03
-1970-01-01 00:00:04
-1970-01-01 00:00:05
-1970-01-01 00:00:06
-1970-01-01 00:00:07
-1970-01-01 00:00:08
1970-01-01 00:00:09
-1970-01-01 00:00:10
-1970-01-01 00:00:11
-1970-01-01 00:00:12
1970-01-01 00:00:13
-1970-01-01 00:00:14
-1970-01-01 00:00:15
-1970-01-01 00:00:16
-1970-01-01 00:00:17
-1970-01-01 00:00:18
1970-01-01 00:00:19
-1970-01-01 00:00:20
-1970-01-01 00:00:21
-1970-01-01 00:00:22
1970-01-01 00:00:23
+1970-01-01 00:00:33
+1970-01-01 00:00:37
+1970-01-01 00:00:51
+1970-01-01 00:00:59
+1970-01-01 00:00:04
+1970-01-01 00:00:10
+1970-01-01 00:00:14
1970-01-01 00:00:24
-1970-01-01 00:00:25
-1970-01-01 00:00:26
-1970-01-01 00:00:27
1970-01-01 00:00:28
-1970-01-01 00:00:29
-1970-01-01 00:00:30
-1970-01-01 00:00:31
-1970-01-01 00:00:32
-1970-01-01 00:00:33
1970-01-01 00:00:34
-1970-01-01 00:00:35
-1970-01-01 00:00:36
-1970-01-01 00:00:37
1970-01-01 00:00:38
+1970-01-01 00:00:48
+1970-01-01 00:00:52
+1970-01-01 00:00:56
+1970-01-01 00:00:03
+1970-01-01 00:00:07
+1970-01-01 00:00:17
+1970-01-01 00:00:21
+1970-01-01 00:00:27
+1970-01-01 00:00:31
+1970-01-01 00:00:35
1970-01-01 00:00:39
-1970-01-01 00:00:40
1970-01-01 00:00:41
-1970-01-01 00:00:42
-1970-01-01 00:00:43
-1970-01-01 00:00:44
1970-01-01 00:00:45
+1970-01-01 00:00:08
+1970-01-01 00:00:12
+1970-01-01 00:00:18
+1970-01-01 00:00:22
+1970-01-01 00:00:32
+1970-01-01 00:00:36
+1970-01-01 00:00:42
1970-01-01 00:00:46
+1970-01-01 00:00:01
+1970-01-01 00:00:05
+1970-01-01 00:00:11
+1970-01-01 00:00:15
+1970-01-01 00:00:25
+1970-01-01 00:00:29
+1970-01-01 00:00:43
1970-01-01 00:00:47
-1970-01-01 00:00:48
1970-01-01 00:00:49
-1970-01-01 00:00:50
-1970-01-01 00:00:51
-1970-01-01 00:00:52
1970-01-01 00:00:53
-1970-01-01 00:00:54
1970-01-01 00:00:55
-1970-01-01 00:00:56
1970-01-01 00:00:57
+1970-01-01 00:00:02
+1970-01-01 00:00:06
+1970-01-01 00:00:16
+1970-01-01 00:00:20
+1970-01-01 00:00:26
+1970-01-01 00:00:30
+1970-01-01 00:00:40
+1970-01-01 00:00:44
+1970-01-01 00:00:50
+1970-01-01 00:00:54
1970-01-01 00:00:58
-1970-01-01 00:00:59
drop table t2;
create table t3 (a datetime not null, primary key(a)) engine='InnoDB'
partition by range (month(a)) subpartition by key (a)
@@ -812,15 +812,15 @@ count(*)
12
select * from t3;
a
-1970-01-01 00:00:00
1970-02-01 00:00:00
+1970-01-01 00:00:00
1970-03-01 00:00:00
1970-04-01 00:00:00
1970-05-01 00:00:00
1970-06-01 00:00:00
-1970-07-01 00:00:00
1970-08-01 00:00:00
1970-09-01 00:00:00
+1970-07-01 00:00:00
1970-10-01 00:00:00
1970-11-01 00:00:00
1970-12-01 00:00:00
@@ -852,15 +852,15 @@ count(*)
12
select * from t4;
a
-1970-01-01 00:00:00
1970-02-01 00:00:00
+1970-01-01 00:00:00
1970-03-01 00:00:00
1970-04-01 00:00:00
1970-05-01 00:00:00
1970-06-01 00:00:00
-1970-07-01 00:00:00
1970-08-01 00:00:00
1970-09-01 00:00:00
+1970-07-01 00:00:00
1970-10-01 00:00:00
1970-11-01 00:00:00
1970-12-01 00:00:00
@@ -885,19 +885,19 @@ t1 CREATE TABLE `t1` (
insert into t1 values ('1975'), (2020), ('1980'), ('2000');
select * from t1;
a
-1975
1980
2000
2020
+1975
select * from t1 where a=1980;
a
1980
delete from t1 where a=1980;
select * from t1;
a
-1975
2000
2020
+1975
drop table t1;
create table t2 (a year not null, primary key(a)) engine='InnoDB'
partition by key (a) partitions 12;
@@ -912,19 +912,19 @@ PARTITIONS 12
insert into t2 values ('1975'), ('2020'), ('1980'), ('2000');
select * from t2;
a
+2020
1975
1980
2000
-2020
select * from t2 where a='1980';
a
1980
delete from t2 where a='1980';
select * from t2;
a
+2020
1975
2000
-2020
delete from t2;
255 inserts;
Warnings:
@@ -934,259 +934,259 @@ count(*)
255
select * from t2;
a
-0000
-1902
-1903
-1904
-1905
-1906
-1907
-1908
1909
-1910
-1911
-1912
-1913
-1914
-1915
-1916
-1917
-1918
-1919
-1920
1921
-1922
-1923
-1924
-1925
-1926
-1927
+1933
+1945
+1957
+1969
+1981
+1993
+2001
+2013
+2029
+2041
+2053
+2065
+2077
+2089
+2100
+2101
+2102
+2103
+2114
+2132
+2133
+2134
+2135
+2146
+1904
+1916
1928
-1929
-1930
-1931
1932
-1933
-1934
-1935
-1936
-1937
-1938
-1939
1940
-1941
-1942
-1943
-1944
-1945
-1946
-1947
-1948
-1949
-1950
-1951
1952
-1953
-1954
-1955
-1956
-1957
-1958
-1959
-1960
-1961
-1962
-1963
1964
-1965
-1966
-1967
-1968
-1969
-1970
-1971
-1972
-1973
-1974
-1975
1976
-1977
-1978
-1979
-1980
-1981
-1982
-1983
-1984
-1985
-1986
-1987
1988
-1989
-1990
+2008
+2020
+2024
+2036
+2048
+2060
+2072
+2084
+1907
+1919
+1931
+1943
+1955
+1967
+1979
1991
-1992
-1993
-1994
-1995
-1996
-1997
-1998
1999
-2000
-2001
-2002
-2003
-2004
-2005
-2006
-2007
-2008
-2009
-2010
2011
-2012
-2013
-2014
-2015
-2016
-2017
+2027
+2039
+2051
+2063
+2075
+2087
+2097
+2099
+2110
+2111
+2112
+2113
+2123
+2129
+2131
+2142
+2143
+2144
+2145
+2147
+1902
+1914
+1926
+1938
+1950
+1962
+1974
+1986
+2006
2018
-2019
-2020
+2034
+2046
+2058
+2070
+2082
+1905
+1917
+1929
+1941
+1953
+1965
+1977
+1989
+1997
+2009
2021
-2022
-2023
-2024
2025
-2026
-2027
-2028
-2029
-2030
-2031
-2032
-2033
-2034
-2035
-2036
2037
-2038
-2039
-2040
-2041
-2042
-2043
-2044
-2045
-2046
-2047
-2048
2049
-2050
-2051
-2052
-2053
-2054
-2055
-2056
-2057
-2058
-2059
-2060
2061
-2062
-2063
-2064
-2065
-2066
-2067
-2068
-2069
-2070
-2071
-2072
2073
-2074
-2075
-2076
-2077
-2078
-2079
-2080
-2081
-2082
-2083
-2084
2085
-2086
-2087
-2088
-2089
-2090
-2091
2092
2093
2094
2095
-2096
-2097
-2098
-2099
-2100
-2101
-2102
-2103
+2120
+2121
+2124
+2125
+2126
+2127
+2152
+2153
+0000
+1912
+1924
+1936
+1948
+1960
+1972
+1984
+1996
+2004
+2016
+2032
+2044
+2056
+2068
+2080
+2116
+2148
+1903
+1915
+1927
+1939
+1951
+1963
+1975
+1987
+2007
+2019
+2023
+2035
+2047
+2059
+2071
+2083
+2155
+1910
+1922
+1934
+1946
+1958
+1970
+1982
+1994
+2002
+2014
+2030
+2042
+2054
+2066
+2078
+2090
+1913
+1925
+1937
+1949
+1961
+1973
+1985
+2005
+2017
+2033
+2045
+2057
+2069
+2081
2104
2105
2106
2107
-2108
-2109
-2110
-2111
-2112
-2113
-2114
-2115
-2116
2117
2118
2119
-2120
-2121
-2122
-2123
-2124
-2125
-2126
-2127
-2128
-2129
-2130
-2131
-2132
-2133
-2134
-2135
2136
2137
2138
2139
-2140
-2141
-2142
-2143
-2144
-2145
-2146
-2147
-2148
2149
2150
2151
-2152
-2153
+1908
+1920
+1944
+1956
+1968
+1980
+1992
+2000
+2012
+2028
+2040
+2052
+2064
+2076
+2088
+2096
+2098
+2108
+2128
+2130
+2140
+1911
+1923
+1935
+1947
+1959
+1971
+1983
+1995
+2003
+2015
+2031
+2043
+2055
+2067
+2079
+2091
+2115
+1906
+1918
+1930
+1942
+1954
+1966
+1978
+1990
+1998
+2010
+2022
+2026
+2038
+2050
+2062
+2074
+2086
+2109
+2122
+2141
2154
-2155
drop table t2;
diff --git a/mysql-test/suite/parts/r/partition_datetime_myisam.result b/mysql-test/suite/parts/r/partition_datetime_myisam.result
index 0d1dcf3ec30..9d8acb09a4a 100644
--- a/mysql-test/suite/parts/r/partition_datetime_myisam.result
+++ b/mysql-test/suite/parts/r/partition_datetime_myisam.result
@@ -509,67 +509,67 @@ SUBPARTITIONS 3
select count(*) from t3;
count(*)
59
-select * from t3;
-a
-10:00:01
-10:00:02
-10:00:03
-10:00:04
-10:00:05
-10:00:06
-10:00:07
-10:00:08
-10:00:09
-10:00:10
-10:00:11
-10:00:12
-10:00:13
-10:00:14
-10:00:15
-10:00:16
-10:00:17
-10:00:18
-10:00:19
-10:00:20
-10:00:21
-10:00:22
-10:00:23
-10:00:24
-10:00:25
-10:00:26
-10:00:27
-10:00:28
-10:00:29
-10:00:30
-10:00:31
-10:00:32
-10:00:33
-10:00:34
-10:00:35
-10:00:36
-10:00:37
-10:00:38
-10:00:39
-10:00:40
-10:00:41
-10:00:42
-10:00:43
-10:00:44
-10:00:45
-10:00:46
-10:00:47
-10:00:48
-10:00:49
-10:00:50
-10:00:51
-10:00:52
-10:00:53
-10:00:54
-10:00:55
-10:00:56
-10:00:57
-10:00:58
-10:00:59
+select a, second(a), if(second(a)<16,1,if(second(a)<31,2,if(second(a)<45,3,4))) from t3;
+a second(a) if(second(a)<16,1,if(second(a)<31,2,if(second(a)<45,3,4)))
+10:00:01 1 1
+10:00:02 2 1
+10:00:03 3 1
+10:00:04 4 1
+10:00:05 5 1
+10:00:06 6 1
+10:00:07 7 1
+10:00:08 8 1
+10:00:09 9 1
+10:00:10 10 1
+10:00:11 11 1
+10:00:12 12 1
+10:00:13 13 1
+10:00:14 14 1
+10:00:15 15 1
+10:00:16 16 2
+10:00:17 17 2
+10:00:18 18 2
+10:00:19 19 2
+10:00:20 20 2
+10:00:21 21 2
+10:00:22 22 2
+10:00:23 23 2
+10:00:24 24 2
+10:00:25 25 2
+10:00:26 26 2
+10:00:27 27 2
+10:00:28 28 2
+10:00:29 29 2
+10:00:30 30 2
+10:00:31 31 3
+10:00:32 32 3
+10:00:33 33 3
+10:00:34 34 3
+10:00:35 35 3
+10:00:36 36 3
+10:00:37 37 3
+10:00:38 38 3
+10:00:39 39 3
+10:00:40 40 3
+10:00:41 41 3
+10:00:42 42 3
+10:00:43 43 3
+10:00:44 44 3
+10:00:45 45 4
+10:00:46 46 4
+10:00:47 47 4
+10:00:48 48 4
+10:00:49 49 4
+10:00:50 50 4
+10:00:51 51 4
+10:00:52 52 4
+10:00:53 53 4
+10:00:54 54 4
+10:00:55 55 4
+10:00:56 56 4
+10:00:57 57 4
+10:00:58 58 4
+10:00:59 59 4
drop table t3;
create table t4 (a time not null, primary key(a)) engine='MyISAM'
partition by list (second(a)) subpartition by key (a)
diff --git a/mysql-test/suite/parts/r/partition_decimal_innodb.result b/mysql-test/suite/parts/r/partition_decimal_innodb.result
index c2f00a8925e..bce034612dc 100644
--- a/mysql-test/suite/parts/r/partition_decimal_innodb.result
+++ b/mysql-test/suite/parts/r/partition_decimal_innodb.result
@@ -18,11 +18,11 @@ t1 CREATE TABLE `t1` (
insert into t1 values (999999.9999), (-999999.9999), (123456.7899), (-123456.7899), (-1.5), (1), (0), (-1), (1.5), (1234.567), (-1234.567);
select * from t1;
a
--999999.9999
--123456.7899
--1234.5670
--1.5000
-1.0000
+-1.5000
+-1234.5670
+-123456.7899
+-999999.9999
0.0000
1.0000
1.5000
@@ -35,11 +35,11 @@ a
delete from t1 where a=1234.567;
select * from t1;
a
--999999.9999
--123456.7899
--1234.5670
--1.5000
-1.0000
+-1.5000
+-1234.5670
+-123456.7899
+-999999.9999
0.0000
1.0000
1.5000
@@ -59,10 +59,10 @@ PARTITIONS 10
insert into t2 values (999999999.999999999), (-999999999.999999999), (-1.5), (-1), (0), (1.5), (1234.567), (-1234.567);
select * from t2;
a
--999999999.999999999
--1234.567000000
--1.500000000
-1.000000000
+-1.500000000
+-1234.567000000
+-999999999.999999999
0.000000000
1.500000000
1234.567000000
@@ -73,10 +73,10 @@ a
delete from t2 where a=1234.567;
select * from t2;
a
--999999999.999999999
--1234.567000000
--1.500000000
-1.000000000
+-1.500000000
+-1234.567000000
+-999999999.999999999
0.000000000
1.500000000
999999999.999999999
diff --git a/mysql-test/suite/parts/r/partition_decimal_myisam.result b/mysql-test/suite/parts/r/partition_decimal_myisam.result
index a5175079a4b..90ea5ea83ef 100644
--- a/mysql-test/suite/parts/r/partition_decimal_myisam.result
+++ b/mysql-test/suite/parts/r/partition_decimal_myisam.result
@@ -18,11 +18,11 @@ t1 CREATE TABLE `t1` (
insert into t1 values (999999.9999), (-999999.9999), (123456.7899), (-123456.7899), (-1.5), (1), (0), (-1), (1.5), (1234.567), (-1234.567);
select * from t1;
a
--999999.9999
--123456.7899
--1234.5670
--1.5000
-1.0000
+-1.5000
+-1234.5670
+-123456.7899
+-999999.9999
0.0000
1.0000
1.5000
@@ -35,11 +35,11 @@ a
delete from t1 where a=1234.567;
select * from t1;
a
--999999.9999
--123456.7899
--1234.5670
--1.5000
-1.0000
+-1.5000
+-1234.5670
+-123456.7899
+-999999.9999
0.0000
1.0000
1.5000
@@ -59,10 +59,10 @@ PARTITIONS 10
insert into t2 values (999999999.999999999), (-999999999.999999999), (-1.5), (-1), (0), (1.5), (1234.567), (-1234.567);
select * from t2;
a
--999999999.999999999
--1234.567000000
--1.500000000
-1.000000000
+-1.500000000
+-1234.567000000
+-999999999.999999999
0.000000000
1.500000000
1234.567000000
@@ -73,10 +73,10 @@ a
delete from t2 where a=1234.567;
select * from t2;
a
--999999999.999999999
--1234.567000000
--1.500000000
-1.000000000
+-1.500000000
+-1234.567000000
+-999999999.999999999
0.000000000
1.500000000
999999999.999999999
diff --git a/mysql-test/suite/parts/r/partition_double_innodb.result b/mysql-test/suite/parts/r/partition_double_innodb.result
index 7563109f30b..41834075790 100644
--- a/mysql-test/suite/parts/r/partition_double_innodb.result
+++ b/mysql-test/suite/parts/r/partition_double_innodb.result
@@ -18,10 +18,10 @@ t1 CREATE TABLE `t1` (
insert into t1 values (-2.2250738585072014E+208), (-2.2250738585072014E-208), (-1.5), (-1), (0), (1.5), (1234.567), (2.2250738585072014E+208);
select * from t1;
a
--2.2250738585072016e208
--1.5
-1
+-1.5
-2.2250738585072014e-208
+-2.2250738585072016e208
0
1.5
1234.567
@@ -32,10 +32,10 @@ a
delete from t1 where a=1.5;
select * from t1;
a
--2.2250738585072016e208
--1.5
-1
+-1.5
-2.2250738585072014e-208
+-2.2250738585072016e208
0
1234.567
2.2250738585072016e208
@@ -53,10 +53,10 @@ PARTITIONS 10
insert into t2 values (-2.2250738585072014E+208), (-2.2250738585072014E-208), (-1.5), (-1), (0), (1.5), (1234.567), (2.2250738585072014E+208);
select * from t2;
a
--2.2250738585072016e208
--1.5
-1
+-1.5
-2.2250738585072014e-208
+-2.2250738585072016e208
0
1.5
1234.567
@@ -67,10 +67,10 @@ a
delete from t2 where a=1234.567;
select * from t2;
a
--2.2250738585072016e208
--1.5
-1
+-1.5
-2.2250738585072014e-208
+-2.2250738585072016e208
0
1.5
2.2250738585072016e208
diff --git a/mysql-test/suite/parts/r/partition_double_myisam.result b/mysql-test/suite/parts/r/partition_double_myisam.result
index e9cf25e6408..f2161d42918 100644
--- a/mysql-test/suite/parts/r/partition_double_myisam.result
+++ b/mysql-test/suite/parts/r/partition_double_myisam.result
@@ -18,10 +18,10 @@ t1 CREATE TABLE `t1` (
insert into t1 values (-2.2250738585072014E+208), (-2.2250738585072014E-208), (-1.5), (-1), (0), (1.5), (1234.567), (2.2250738585072014E+208);
select * from t1;
a
--2.2250738585072016e208
--1.5
-1
+-1.5
-2.2250738585072014e-208
+-2.2250738585072016e208
0
1.5
1234.567
@@ -32,10 +32,10 @@ a
delete from t1 where a=1.5;
select * from t1;
a
--2.2250738585072016e208
--1.5
-1
+-1.5
-2.2250738585072014e-208
+-2.2250738585072016e208
0
1234.567
2.2250738585072016e208
@@ -53,10 +53,10 @@ PARTITIONS 10
insert into t2 values (-2.2250738585072014E+208), (-2.2250738585072014E-208), (-1.5), (-1), (0), (1.5), (1234.567), (2.2250738585072014E+208);
select * from t2;
a
--2.2250738585072016e208
--1.5
-1
+-1.5
-2.2250738585072014e-208
+-2.2250738585072016e208
0
1.5
1234.567
@@ -67,10 +67,10 @@ a
delete from t2 where a=1234.567;
select * from t2;
a
--2.2250738585072016e208
--1.5
-1
+-1.5
-2.2250738585072014e-208
+-2.2250738585072016e208
0
1.5
2.2250738585072016e208
diff --git a/mysql-test/suite/parts/r/partition_float_innodb.result b/mysql-test/suite/parts/r/partition_float_innodb.result
index 7cdccdb886f..c82609b496c 100644
--- a/mysql-test/suite/parts/r/partition_float_innodb.result
+++ b/mysql-test/suite/parts/r/partition_float_innodb.result
@@ -18,10 +18,10 @@ t1 CREATE TABLE `t1` (
insert into t1 values (-3.402823466E+38), (3.402823466E+38), (-1.5), (-1), (0), (1), (1.5);
select * from t1;
a
+0
-3.40282e38
-1.5
-1
-0
1
1.5
3.40282e38
@@ -31,10 +31,10 @@ a
delete from t1 where a=1.5;
select * from t1;
a
+0
-3.40282e38
-1.5
-1
-0
1
3.40282e38
drop table t1;
@@ -51,10 +51,10 @@ PARTITIONS 10
insert into t2 values (-3.402823466E+38), (-3.402823466E+37), (-123.456), (0), (1234546.789), (123.456), (1.5);
select * from t2;
a
+0
-3.40282e38
-3.40282e37
-123.456
-0
1.5
123.456
1234550
@@ -63,10 +63,10 @@ a
delete from t2 where a=123.456;
select * from t2;
a
+0
-3.40282e38
-3.40282e37
-123.456
-0
1.5
123.456
1234550
@@ -76,10 +76,10 @@ a
delete from t2 where a=1.5;
select * from t2;
a
+0
-3.40282e38
-3.40282e37
-123.456
-0
123.456
1234550
delete from t2;
diff --git a/mysql-test/suite/parts/r/partition_special_innodb.result b/mysql-test/suite/parts/r/partition_special_innodb.result
index 27f8f0a9d5c..2f056de2b7a 100644
--- a/mysql-test/suite/parts/r/partition_special_innodb.result
+++ b/mysql-test/suite/parts/r/partition_special_innodb.result
@@ -67,9 +67,9 @@ insert into t1 values
('2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, 'pib mdotkbm.m' );
select * from t1;
a b c d e f g h i
+1975-01-01 abcde abcde m 1234 123.45 32412341234 113 tbhth nrzh ztfghgfh fzh ftzhj fztjh
1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 liuugbzvdmrlti b itiortudirtfgtibm dfi
-1975-01-01 abcde abcde m 1234 123.45 32412341234 113 tbhth nrzh ztfghgfh fzh ftzhj fztjh
2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 pib mdotkbm.m
select * from t1 where a<19851231;
a b c d e f g h i
@@ -117,9 +117,9 @@ insert into t1 values
('2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, 'pib mdotkbm.m');
select * from t1;
a b c d e f g h a1 b1 c1 d1 e1 f1 g1 h1 i
-1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 liuugbzvdmrlti b itiortudirtfgtibm dfi
-1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 tbhth nrzh ztfghgfh fzh ftzhj fztjh
+1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
+1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 liuugbzvdmrlti b itiortudirtfgtibm dfi
2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 pib mdotkbm.m
select * from t1 where a<19851231;
a b c d e f g h a1 b1 c1 d1 e1 f1 g1 h1 i
@@ -197,9 +197,9 @@ insert into t1 values
('2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, 'pib mdotkbm.m');
select * from t1;
a b c d e f g h a1 b1 c1 d1 e1 f1 g1 h1 a2 b2 c2 d2 e2 f2 g2 h2 a3 b3 c3 d3 e3 f3 g3 h3 i
+1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 tbhth nrzh ztfghgfh fzh ftzhj fztjh
1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 liuugbzvdmrlti b itiortudirtfgtibm dfi
-1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 tbhth nrzh ztfghgfh fzh ftzhj fztjh
2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 pib mdotkbm.m
select * from t1 where a<19851231;
a b c d e f g h a1 b1 c1 d1 e1 f1 g1 h1 a2 b2 c2 d2 e2 f2 g2 h2 a3 b3 c3 d3 e3 f3 g3 h3 i
diff --git a/mysql-test/suite/parts/r/partition_special_myisam.result b/mysql-test/suite/parts/r/partition_special_myisam.result
index f6ceef4fc77..ce30977cfb7 100644
--- a/mysql-test/suite/parts/r/partition_special_myisam.result
+++ b/mysql-test/suite/parts/r/partition_special_myisam.result
@@ -67,9 +67,9 @@ insert into t1 values
('2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, 'pib mdotkbm.m' );
select * from t1;
a b c d e f g h i
+1975-01-01 abcde abcde m 1234 123.45 32412341234 113 tbhth nrzh ztfghgfh fzh ftzhj fztjh
1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 liuugbzvdmrlti b itiortudirtfgtibm dfi
-1975-01-01 abcde abcde m 1234 123.45 32412341234 113 tbhth nrzh ztfghgfh fzh ftzhj fztjh
2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 pib mdotkbm.m
select * from t1 where a<19851231;
a b c d e f g h i
@@ -117,9 +117,9 @@ insert into t1 values
('2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, 'pib mdotkbm.m');
select * from t1;
a b c d e f g h a1 b1 c1 d1 e1 f1 g1 h1 i
-1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 liuugbzvdmrlti b itiortudirtfgtibm dfi
-1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 tbhth nrzh ztfghgfh fzh ftzhj fztjh
+1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
+1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 liuugbzvdmrlti b itiortudirtfgtibm dfi
2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 pib mdotkbm.m
select * from t1 where a<19851231;
a b c d e f g h a1 b1 c1 d1 e1 f1 g1 h1 i
@@ -197,9 +197,9 @@ insert into t1 values
('2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, '2000-06-15', 'jukg','zikhuk','m', 45675, 6465754.13, 435242623462, 18, 'pib mdotkbm.m');
select * from t1;
a b c d e f g h a1 b1 c1 d1 e1 f1 g1 h1 a2 b2 c2 d2 e2 f2 g2 h2 a3 b3 c3 d3 e3 f3 g3 h3 i
+1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 tbhth nrzh ztfghgfh fzh ftzhj fztjh
1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 1980-10-14 fgbbd dtzndtz w 67856 5463354.67 3567845333 124 d,f söierugsig msireg siug ei5ggth lrutluitgzeöjrtnb.rkjthuekuhzrkuthgjdnffjmbr
1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 1983-12-31 cdef srtbvsr w 45634 13452.56 3452346456 127 liuugbzvdmrlti b itiortudirtfgtibm dfi
-1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 1975-01-01 abcde abcde m 1234 123.45 32412341234 113 tbhth nrzh ztfghgfh fzh ftzhj fztjh
2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 2000-06-15 jukg zikhuk m 45675 6465754.13 435242623462 18 pib mdotkbm.m
select * from t1 where a<19851231;
a b c d e f g h a1 b1 c1 d1 e1 f1 g1 h1 a2 b2 c2 d2 e2 f2 g2 h2 a3 b3 c3 d3 e3 f3 g3 h3 i
diff --git a/mysql-test/suite/parts/r/percona_nonflushing_analyze_debug.result b/mysql-test/suite/parts/r/percona_nonflushing_analyze_debug.result
index 070d5e8d79f..18afb41f31b 100644
--- a/mysql-test/suite/parts/r/percona_nonflushing_analyze_debug.result
+++ b/mysql-test/suite/parts/r/percona_nonflushing_analyze_debug.result
@@ -4,7 +4,7 @@ PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (10));
INSERT INTO t1 VALUES (1), (2), (3), (4);
connect con1,localhost,root;
-SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
+SET DEBUG_SYNC="handler_rnd_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
SELECT * FROM t1;
connection default;
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
@@ -39,7 +39,7 @@ PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (10));
INSERT INTO t2 VALUES (1), (2), (3), (4);
connect con1,localhost,root;
-SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
+SET DEBUG_SYNC="handler_rnd_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
SELECT * FROM t2;
connection default;
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
@@ -51,17 +51,17 @@ test.t2 analyze status OK
set use_stat_tables=@tmp;
SELECT * FROM t2;
a
-1
2
-3
+1
4
+3
SET DEBUG_SYNC="now SIGNAL finish_scan";
connection con1;
a
-1
2
-3
+1
4
+3
disconnect con1;
connection default;
SET DEBUG_SYNC='reset';
diff --git a/mysql-test/suite/parts/r/reorganize_partition_innodb.result b/mysql-test/suite/parts/r/reorganize_partition_innodb.result
index b56cb6bbaeb..b2233781219 100644
--- a/mysql-test/suite/parts/r/reorganize_partition_innodb.result
+++ b/mysql-test/suite/parts/r/reorganize_partition_innodb.result
@@ -40,6 +40,8 @@ t CREATE TABLE `t` (
PARTITION `p2` DATA DIRECTORY = 'MYSQLTEST_VARDIR/tmp/partitions_here' ENGINE = InnoDB)
SET @TMP = @@GLOBAL.INNODB_FILE_PER_TABLE;
SET GLOBAL INNODB_FILE_PER_TABLE=OFF;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
ALTER TABLE t ADD PRIMARY KEY pk(a), ALGORITHM=INPLACE;
Warnings:
Warning 1280 Name 'pk' ignored for PRIMARY key.
@@ -53,6 +55,8 @@ t CREATE TABLE `t` (
(PARTITION `p1` DATA DIRECTORY = 'MYSQLTEST_VARDIR/tmp/partitions_here' ENGINE = InnoDB,
PARTITION `p2` DATA DIRECTORY = 'MYSQLTEST_VARDIR/tmp/partitions_here' ENGINE = InnoDB)
SET GLOBAL INNODB_FILE_PER_TABLE=@TMP;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
ALTER TABLE t REORGANIZE PARTITION p1,p2 INTO (
PARTITION p1 DATA DIRECTORY = 'MYSQLTEST_VARDIR/tmp/partitions_somewhere_else/' ENGINE = INNODB,
PARTITION p2 DATA DIRECTORY = 'MYSQLTEST_VARDIR/tmp/partitions_somewhere_else/' ENGINE = INNODB
diff --git a/mysql-test/suite/parts/t/debug_innodb_crash-master.opt b/mysql-test/suite/parts/t/debug_innodb_crash-master.opt
index b7f94e14e12..d8813652cd6 100644
--- a/mysql-test/suite/parts/t/debug_innodb_crash-master.opt
+++ b/mysql-test/suite/parts/t/debug_innodb_crash-master.opt
@@ -1 +1 @@
---loose-innodb-file-per-table=1 --loose-skip-stack-trace --skip-core-file --loose-innodb-buffer-pool-size=32M
+--loose-skip-stack-trace --skip-core-file --loose-innodb-buffer-pool-size=32M
diff --git a/mysql-test/suite/parts/t/partition_debug_sync_innodb-master.opt b/mysql-test/suite/parts/t/partition_debug_sync_innodb-master.opt
deleted file mode 100644
index 115a0ba2cc8..00000000000
--- a/mysql-test/suite/parts/t/partition_debug_sync_innodb-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---loose-innodb_file_per_table
diff --git a/mysql-test/suite/perfschema/include/upgrade_check.inc b/mysql-test/suite/perfschema/include/upgrade_check.inc
index c16e90c78c2..30e33ebb5f1 100644
--- a/mysql-test/suite/perfschema/include/upgrade_check.inc
+++ b/mysql-test/suite/perfschema/include/upgrade_check.inc
@@ -12,4 +12,4 @@
--cat_file $MYSQLTEST_VARDIR/tmp/err_file
--remove_file $MYSQLTEST_VARDIR/tmp/out_file
--remove_file $MYSQLTEST_VARDIR/tmp/err_file
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
diff --git a/mysql-test/suite/perfschema/r/alter_table_progress.result b/mysql-test/suite/perfschema/r/alter_table_progress.result
index 08c2c3a6145..31cc60927f6 100644
--- a/mysql-test/suite/perfschema/r/alter_table_progress.result
+++ b/mysql-test/suite/perfschema/r/alter_table_progress.result
@@ -16,8 +16,6 @@ connection default;
SET DEBUG_SYNC='now WAIT_FOR found_row';
select event_id from performance_schema.events_statements_current
where thread_id = @con1_thread_id into @con1_stmt_id;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select EVENT_NAME, WORK_COMPLETED, WORK_ESTIMATED
from performance_schema.events_stages_current
where (thread_id = @con1_thread_id);
diff --git a/mysql-test/suite/perfschema/r/batch_table_io_func.result b/mysql-test/suite/perfschema/r/batch_table_io_func.result
index 212c08c7e90..4336ebd3bd6 100644
--- a/mysql-test/suite/perfschema/r/batch_table_io_func.result
+++ b/mysql-test/suite/perfschema/r/batch_table_io_func.result
@@ -154,8 +154,8 @@ alter table t3 add index(id2);
explain extended select t1.*, t2.*, t3.*
from t1 join t2 using (id1) join t3 using (id2);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL id1 NULL NULL NULL 10 100.00 Using where
-1 SIMPLE t2 ref id2,id1 id1 5 test.t1.id1 10 100.00 Using where
+1 SIMPLE t1 ALL id1 NULL NULL NULL 10 100.00
+1 SIMPLE t2 ALL id2,id1 NULL NULL NULL 100 10.00 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t3 ref id2 id2 5 test.t2.id2 10 100.00
Warnings:
Note 1003 select `test`.`t1`.`id1` AS `id1`,`test`.`t1`.`a` AS `a`,`test`.`t2`.`id1` AS `id1`,`test`.`t2`.`id2` AS `id2`,`test`.`t2`.`b` AS `b`,`test`.`t3`.`id2` AS `id2`,`test`.`t3`.`id3` AS `id3`,`test`.`t3`.`c` AS `c` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t3`.`id2` = `test`.`t2`.`id2` and `test`.`t2`.`id1` = `test`.`t1`.`id1`
@@ -167,7 +167,7 @@ number_seen OBJECT_TYPE OBJECT_SCHEMA OBJECT_NAME INDEX_NAME OPERATION NUMBER_OF
11 TABLE test t1 NULL fetch 1
1 TABLE test t1 id1 read external NULL
1 TABLE test t1 id1 read normal NULL
-110 TABLE test t2 id1 fetch 1
+101 TABLE test t2 NULL fetch 1
1 TABLE test t2 id2 read external NULL
1 TABLE test t2 id2 read normal NULL
100 TABLE test t3 id2 fetch 10
@@ -177,14 +177,15 @@ OBJECT_TYPE OBJECT_SCHEMA OBJECT_NAME INDEX_NAME COUNT_STAR COUNT_READ COUNT_WRI
TABLE test t0 NULL 0 0 0
TABLE test t1 NULL 11 11 0
TABLE test t1 id1 0 0 0
-TABLE test t2 id1 110 110 0
+TABLE test t2 NULL 101 101 0
+TABLE test t2 id1 0 0 0
TABLE test t2 id2 0 0 0
TABLE test t3 id2 1000 1000 0
TABLE test t3 id3 0 0 0
OBJECT_TYPE OBJECT_SCHEMA OBJECT_NAME COUNT_STAR COUNT_READ COUNT_WRITE
TABLE test t0 0 0 0
TABLE test t1 11 11 0
-TABLE test t2 110 110 0
+TABLE test t2 101 101 0
TABLE test t3 1000 1000 0
drop table t0;
drop table t1;
diff --git a/mysql-test/suite/perfschema/r/dml_handler.result b/mysql-test/suite/perfschema/r/dml_handler.result
index 61bbba3189e..1510adc7148 100644
--- a/mysql-test/suite/perfschema/r/dml_handler.result
+++ b/mysql-test/suite/perfschema/r/dml_handler.result
@@ -6,8 +6,6 @@ SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA='performance_schema'
ORDER BY TABLE_NAME;
SELECT COUNT(*) FROM table_list INTO @table_count;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
#
# For each table in the performance schema, attempt HANDLER...OPEN,
# which should fail with an error 1031, ER_ILLEGAL_HA.
diff --git a/mysql-test/suite/perfschema/r/ortho_iter.result b/mysql-test/suite/perfschema/r/ortho_iter.result
index 9489c1049e5..388fac4b668 100644
--- a/mysql-test/suite/perfschema/r/ortho_iter.result
+++ b/mysql-test/suite/perfschema/r/ortho_iter.result
@@ -218,10 +218,6 @@ close pfs_cursor;
signal sqlstate '01000' set message_text='Done', mysql_errno=12000;
end
$
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
show global variables like "performance_schema%";
Variable_name Value
performance_schema ON
diff --git a/mysql-test/suite/perfschema/r/rpl_threads.result b/mysql-test/suite/perfschema/r/rpl_threads.result
index c756b4d9046..e9ad54386ab 100644
--- a/mysql-test/suite/perfschema/r/rpl_threads.result
+++ b/mysql-test/suite/perfschema/r/rpl_threads.result
@@ -16,8 +16,6 @@ connection master;
select ID from INFORMATION_SCHEMA.PROCESSLIST
where COMMAND = "Binlog Dump"
into @master_dump_pid;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select COMMAND, STATE
from INFORMATION_SCHEMA.PROCESSLIST
where ID = @master_dump_pid;
@@ -33,8 +31,6 @@ connection slave;
select ID from INFORMATION_SCHEMA.PROCESSLIST
where STATE like "Waiting for master to send event%"
into @slave_io_pid;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select COMMAND, STATE
from INFORMATION_SCHEMA.PROCESSLIST
where ID = @slave_io_pid;
@@ -47,8 +43,6 @@ NAME TYPE PROCESSLIST_COMMAND PROCESSLIST_STATE
select ID from INFORMATION_SCHEMA.PROCESSLIST
where STATE like "Slave has read all relay log%"
into @slave_sql_pid;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select COMMAND, STATE
from INFORMATION_SCHEMA.PROCESSLIST
where ID = @slave_sql_pid;
diff --git a/mysql-test/suite/perfschema/r/selects.result b/mysql-test/suite/perfschema/r/selects.result
index d623d45a6e8..c14d152856f 100644
--- a/mysql-test/suite/perfschema/r/selects.result
+++ b/mysql-test/suite/perfschema/r/selects.result
@@ -93,8 +93,6 @@ SELECT thread_id FROM performance_schema.threads
WHERE PROCESSLIST_ID = conid INTO pid;
END;
|
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL t_ps_proc(connection_id(), @p_id);
DROP FUNCTION IF EXISTS t_ps_proc;
CREATE FUNCTION t_ps_func(conid INT) RETURNS int
diff --git a/mysql-test/suite/perfschema/t/show_sanity.test b/mysql-test/suite/perfschema/t/show_sanity.test
index 3ca88b556f8..35dd8afdbfb 100644
--- a/mysql-test/suite/perfschema/t/show_sanity.test
+++ b/mysql-test/suite/perfschema/t/show_sanity.test
@@ -400,9 +400,6 @@ insert into test.sanity values
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_BUFFER_POOL_LOAD_NOW"),
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_BUFFER_POOL_SIZE"),
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_BUF_FLUSH_LIST_NOW"),
- ("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_CHANGE_BUFFERING"),
- ("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_CHANGE_BUFFERING_DEBUG"),
- ("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_CHANGE_BUFFER_MAX_SIZE"),
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_CHECKSUM_ALGORITHM"),
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_CMP_PER_INDEX_ENABLED"),
("JUNK: GLOBAL-ONLY", "I_S.SESSION_VARIABLES", "INNODB_COMPRESS_DEBUG"),
diff --git a/mysql-test/suite/period/r/delete,myisam.rdiff b/mysql-test/suite/period/r/delete,myisam.rdiff
index 78fb972b0bc..179f399ac9a 100644
--- a/mysql-test/suite/period/r/delete,myisam.rdiff
+++ b/mysql-test/suite/period/r/delete,myisam.rdiff
@@ -1,5 +1,5 @@
---- suite/period/r/delete.result 2019-02-16 11:14:23.511258191 +0100
-+++ suite/period/r/delete.reject 2019-02-16 11:14:32.869258690 +0100
+--- suite/period/r/delete.result
++++ suite/period/r/delete.reject
@@ -250,7 +250,6 @@
ERROR 22003: Out of range value for column 'id' at row 1
select * from t;
diff --git a/mysql-test/suite/roles/admin.result b/mysql-test/suite/roles/admin.result
index 2ecbfae4516..4e8fa9652a0 100644
--- a/mysql-test/suite/roles/admin.result
+++ b/mysql-test/suite/roles/admin.result
@@ -8,9 +8,9 @@ create role role3 with admin role1;
create role role4 with admin root@localhost;
connect c1, localhost, foo,,;
create role role5 with admin root@localhost;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
create role role5 with admin role3;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
create role role5 with admin foo@localhost;
connection default;
call mtr.add_suppression("Invalid roles_mapping table entry user:'foo@bar', rolename:'role6'");
diff --git a/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.test b/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.test
index dac6eab21e9..e397989bec5 100644
--- a/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.test
+++ b/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.test
@@ -17,7 +17,7 @@ alter table user drop column max_statement_time;
flush privileges;
---replace_regex /10\d\d\d\d/MYSQL_VERSION_ID/
+--replace_regex /11\d\d\d\d/MYSQL_VERSION_ID/
--error ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
create role test_role;
--error ER_CANNOT_USER
@@ -30,8 +30,6 @@ after password_expired;
create role test_role;
create user test_user@localhost;
grant test_role to test_user@localhost;
-#--replace_regex /10\d\d\d\d/MYSQL_VERSION_ID/
-#--error ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
set default role test_role for root@localhost;
drop role test_role;
drop user test_user@localhost;
diff --git a/mysql-test/suite/roles/definer.result b/mysql-test/suite/roles/definer.result
index 091ba255bc6..f7cc9684ce5 100644
--- a/mysql-test/suite/roles/definer.result
+++ b/mysql-test/suite/roles/definer.result
@@ -669,7 +669,7 @@ CREATE DEFINER='r1' PROCEDURE user1_proc2() SQL SECURITY INVOKER
BEGIN
SELECT NOW(), VERSION();
END;//
-ERROR 42000: Access denied; you need (at least one of) the SUPER, SET USER privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the SET USER privilege(s) for this operation
set role r1;
CREATE DEFINER='r1' PROCEDURE user1_proc2() SQL SECURITY INVOKER
BEGIN
diff --git a/mysql-test/suite/rpl/r/rpl_cross_version.result b/mysql-test/suite/rpl/r/rpl_cross_version.result
deleted file mode 100644
index 1b67542c106..00000000000
--- a/mysql-test/suite/rpl/r/rpl_cross_version.result
+++ /dev/null
@@ -1,22 +0,0 @@
-include/master-slave.inc
-[connection master]
-==== Initialize ====
-connection slave;
-include/stop_slave.inc
-RESET SLAVE;
-include/setup_fake_relay_log.inc
-Setting up fake replication from MYSQL_TEST_DIR/suite/binlog/std_data/binlog_old_version_4_1.000001
-==== Test ====
-start slave sql_thread;
-include/wait_for_slave_param.inc [Exec_Master_Log_Pos]
-==== a prove that the fake has been processed successfully ====
-SELECT COUNT(*) - 17920 as zero FROM t3;
-zero
-0
-==== Clean up ====
-include/stop_slave_sql.inc
-include/cleanup_fake_relay_log.inc
-Warnings:
-Note 4190 RESET SLAVE is implicitly changing the value of 'Using_Gtid' from 'No' to 'Slave_Pos'
-drop table t1, t3;
-include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_delayed_slave,parallel.rdiff b/mysql-test/suite/rpl/r/rpl_delayed_slave,parallel.rdiff
index aaadbb28ca3..d803f8be5c2 100644
--- a/mysql-test/suite/rpl/r/rpl_delayed_slave,parallel.rdiff
+++ b/mysql-test/suite/rpl/r/rpl_delayed_slave,parallel.rdiff
@@ -1,5 +1,5 @@
---- mysql-test/suite/rpl/r/rpl_delayed_slave.result 2016-10-14 21:14:02.338075590 +0200
-+++ mysql-test/suite/rpl/r/rpl_delayed_slave,parallel.reject 2016-10-14 21:17:51.296986686 +0200
+--- mysql-test/suite/rpl/r/rpl_delayed_slave.result
++++ mysql-test/suite/rpl/r/rpl_delayed_slave,parallel.reject
@@ -45,7 +45,6 @@
# wait for first query to execute
# sleep 1*T
diff --git a/mysql-test/suite/rpl/r/rpl_drop_db.result b/mysql-test/suite/rpl/r/rpl_drop_db.result
index 3712527afe4..1b132c20afc 100644
--- a/mysql-test/suite/rpl/r/rpl_drop_db.result
+++ b/mysql-test/suite/rpl/r/rpl_drop_db.result
@@ -6,8 +6,6 @@ create database mysqltest1;
create table mysqltest1.t1 (n int);
insert into mysqltest1.t1 values (1);
select * from mysqltest1.t1 into outfile 'mysqltest1/f1.txt';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create table mysqltest1.t2 (n int);
create table mysqltest1.t3 (n int);
drop database mysqltest1;
diff --git a/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result b/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result
index 2ecc3ac159c..063e568f5fa 100644
--- a/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result
+++ b/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result
@@ -51,10 +51,6 @@ DELETE FROM test.regular_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
-Warnings:
-Level Warning
-Code 1287
-Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE PROCEDURE test.proc_bykey()
BEGIN
DECLARE ins_count INT DEFAULT 1000;
@@ -76,10 +72,6 @@ DELETE FROM test.bykey_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
-Warnings:
-Level Warning
-Code 1287
-Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE PROCEDURE test.proc_byrange()
BEGIN
DECLARE ins_count INT DEFAULT 1000;
@@ -101,10 +93,6 @@ DELETE FROM test.byrange_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
-Warnings:
-Level Warning
-Code 1287
-Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
begin;
CALL test.proc_norm();
commit;
diff --git a/mysql-test/suite/rpl/r/rpl_innodb_bug30888.result b/mysql-test/suite/rpl/r/rpl_innodb_bug30888.result
index da6888e76a0..d4640a36a7d 100644
--- a/mysql-test/suite/rpl/r/rpl_innodb_bug30888.result
+++ b/mysql-test/suite/rpl/r/rpl_innodb_bug30888.result
@@ -26,10 +26,6 @@ DELETE FROM test.regular_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
-Warnings:
-Level Warning
-Code 1287
-Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL test.proc_norm();
connection slave;
connection master;
diff --git a/mysql-test/suite/rpl/r/rpl_insert_delayed,stmt.rdiff b/mysql-test/suite/rpl/r/rpl_insert_delayed,stmt.rdiff
index 1946228f401..d1a101e51ca 100644
--- a/mysql-test/suite/rpl/r/rpl_insert_delayed,stmt.rdiff
+++ b/mysql-test/suite/rpl/r/rpl_insert_delayed,stmt.rdiff
@@ -1,5 +1,5 @@
---- suite/rpl/r/rpl_insert_delayed.result 2016-03-25 19:44:43.408210896 +0400
-+++ suite/rpl/r/rpl_insert_delayed,stmt.reject 2016-03-25 23:55:18.396360848 +0400
+--- suite/rpl/r/rpl_insert_delayed.result
++++ suite/rpl/r/rpl_insert_delayed,stmt.reject
@@ -18,19 +18,19 @@
insert delayed into t1 values(10, "my name");
flush table t1;
diff --git a/mysql-test/suite/rpl/r/rpl_iodku,stmt.rdiff b/mysql-test/suite/rpl/r/rpl_iodku,stmt.rdiff
index e31f1e5d991..2986a47c9ae 100644
--- a/mysql-test/suite/rpl/r/rpl_iodku,stmt.rdiff
+++ b/mysql-test/suite/rpl/r/rpl_iodku,stmt.rdiff
@@ -1,5 +1,5 @@
---- r/rpl_iodku.result 2022-05-04 18:51:24.956414404 +0300
-+++ r/rpl_iodku,stmt.reject 2022-05-04 18:51:49.520106231 +0300
+--- r/rpl_iodku.result
++++ r/rpl_iodku,stmt.reject
@@ -1,10 +1,15 @@
include/master-slave.inc
[connection master]
diff --git a/mysql-test/suite/rpl/r/rpl_mdev12179.result b/mysql-test/suite/rpl/r/rpl_mdev12179.result
index dcda036cdfb..7cb750dd71c 100644
--- a/mysql-test/suite/rpl/r/rpl_mdev12179.result
+++ b/mysql-test/suite/rpl/r/rpl_mdev12179.result
@@ -259,8 +259,6 @@ connection server_2;
*** Restart the slave server to prove 'gtid_slave_pos_innodb' autodiscovery ***
connection server_2;
SELECT max(seq_no) FROM mysql.gtid_slave_pos_InnoDB into @seq_no;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
connection server_1;
INSERT INTO t2(a) SELECT 1+MAX(a) FROM t2;
include/save_master_gtid.inc
diff --git a/mysql-test/suite/rpl/r/rpl_misc_functions.result b/mysql-test/suite/rpl/r/rpl_misc_functions.result
index 302cf2351c2..6c20623d62b 100644
--- a/mysql-test/suite/rpl/r/rpl_misc_functions.result
+++ b/mysql-test/suite/rpl/r/rpl_misc_functions.result
@@ -42,8 +42,6 @@ INSERT INTO t1 (col_a) VALUES (test_replication_sf());
INSERT INTO t1 (col_a) VALUES (test_replication_sf());
connection slave;
select * from t1 into outfile "../../tmp/t1_slave.txt";
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
connection master;
create temporary table t1_slave select * from t1 where 1=0;
load data infile '../../tmp/t1_slave.txt' into table t1_slave;
diff --git a/mysql-test/suite/rpl/r/rpl_old_master_29078.result b/mysql-test/suite/rpl/r/rpl_old_master_29078.result
index bc7f188a48e..43d998048a0 100644
--- a/mysql-test/suite/rpl/r/rpl_old_master_29078.result
+++ b/mysql-test/suite/rpl/r/rpl_old_master_29078.result
@@ -48,6 +48,8 @@ ROLLBACK /* added by mysqlbinlog */;
/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;
connection slave;
set global explicit_defaults_for_timestamp=0;
+Warnings:
+Warning 1681 'explicit_defaults_for_timestamp=0' is deprecated and will be removed in a future release
include/reset_slave.inc
include/start_slave.inc
show create table t1;
diff --git a/mysql-test/suite/rpl/r/rpl_parallel_29322.result b/mysql-test/suite/rpl/r/rpl_parallel_29322.result
index 752fb321225..2c0cb144027 100644
--- a/mysql-test/suite/rpl/r/rpl_parallel_29322.result
+++ b/mysql-test/suite/rpl/r/rpl_parallel_29322.result
@@ -10,6 +10,8 @@ set @@session.explicit_defaults_for_timestamp = 1;
connection slave;
set @sav.explicit_defaults_for_timestamp = @@global.explicit_defaults_for_timestamp;
set global explicit_defaults_for_timestamp = 0;
+Warnings:
+Warning 1681 'explicit_defaults_for_timestamp=0' is deprecated and will be removed in a future release
set @sav.slave_parallel_workers = @@global.slave_parallel_workers;
include/stop_slave.inc
set @@global.slave_parallel_workers = 1;
@@ -105,6 +107,8 @@ connection slave;
# B. alternate the master and slave vars' values to (0,1)
connection master;
set @@session.explicit_defaults_for_timestamp = 0;
+Warnings:
+Warning 1681 'explicit_defaults_for_timestamp=0' is deprecated and will be removed in a future release
connection slave;
set @@global.explicit_defaults_for_timestamp = 1;
connection slave;
diff --git a/mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.rdiff b/mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.rdiff
index 3815ec9375d..1154f92c39e 100644
--- a/mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.rdiff
+++ b/mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.rdiff
@@ -1,5 +1,5 @@
---- /home/my/maria-test/mysql-test/suite/rpl/r/rpl_row_big_table_id.result 2019-08-18 15:19:56.829962449 +0300
-+++ /home/my/maria-test/mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.reject 2019-08-18 15:20:19.253763968 +0300
+--- /home/my/maria-test/mysql-test/suite/rpl/r/rpl_row_big_table_id.result
++++ /home/my/maria-test/mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.reject
@@ -20,22 +20,22 @@
master-bin.000001 # Query 1 # use `test`; ALTER TABLE t comment ''
master-bin.000001 # Gtid 1 # BEGIN GTID #-#-#
diff --git a/mysql-test/suite/rpl/r/rpl_skip_replication.result b/mysql-test/suite/rpl/r/rpl_skip_replication.result
index 96e0a30331d..c17ffbb5e47 100644
--- a/mysql-test/suite/rpl/r/rpl_skip_replication.result
+++ b/mysql-test/suite/rpl/r/rpl_skip_replication.result
@@ -12,7 +12,7 @@ SELECT,UPDATE ON *.* TO 'nonsuperuser'@'127.0.0.1';
connect nonpriv, 127.0.0.1, nonsuperuser,, test, $SLAVE_MYPORT,;
connection nonpriv;
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_MASTER;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
disconnect nonpriv;
connection slave;
DROP USER'nonsuperuser'@'127.0.0.1';
diff --git a/mysql-test/suite/rpl/r/rpl_temporary.result b/mysql-test/suite/rpl/r/rpl_temporary.result
index 492e9ac3ac3..3651ead16cc 100644
--- a/mysql-test/suite/rpl/r/rpl_temporary.result
+++ b/mysql-test/suite/rpl/r/rpl_temporary.result
@@ -42,12 +42,12 @@ connect con3,localhost,zedjzlcsjhd,,;
connection con3;
SET @save_select_limit=@@session.sql_select_limit;
SET @@session.sql_select_limit=10, @@session.pseudo_thread_id=100;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation
SELECT @@session.sql_select_limit = @save_select_limit;
@@session.sql_select_limit = @save_select_limit
1
SET @@session.sql_select_limit=10, @@session.sql_log_bin=0;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SELECT @@session.sql_select_limit = @save_select_limit;
@@session.sql_select_limit = @save_select_limit
1
diff --git a/mysql-test/suite/rpl/r/rpl_timestamp.result b/mysql-test/suite/rpl/r/rpl_timestamp.result
index 31ffd1ed371..ff370d61d9f 100644
--- a/mysql-test/suite/rpl/r/rpl_timestamp.result
+++ b/mysql-test/suite/rpl/r/rpl_timestamp.result
@@ -2,6 +2,8 @@ include/master-slave.inc
[connection master]
set timestamp=1656940000;
set explicit_defaults_for_timestamp=!@@explicit_defaults_for_timestamp;
+Warnings:
+Warning 1681 'explicit_defaults_for_timestamp=0' is deprecated and will be removed in a future release
connection master;
create table t1 (f1 timestamp, f2 timestamp);
show create table t1;
diff --git a/mysql-test/suite/rpl/t/rpl_cross_version-master.opt b/mysql-test/suite/rpl/t/rpl_cross_version-master.opt
deleted file mode 100644
index 815a8f81d32..00000000000
--- a/mysql-test/suite/rpl/t/rpl_cross_version-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---replicate-same-server-id --relay-log=slave-relay-bin
diff --git a/mysql-test/suite/rpl/t/rpl_cross_version.test b/mysql-test/suite/rpl/t/rpl_cross_version.test
deleted file mode 100644
index 94c9f0432ce..00000000000
--- a/mysql-test/suite/rpl/t/rpl_cross_version.test
+++ /dev/null
@@ -1,48 +0,0 @@
-# ==== Purpose ====
-#
-# Verify cross-version replication from an old master to the up-to-date slave
-#
-# ==== Implementation ====
-#
-# Feed to the slave server a binlog recorded on an old version master
-# via setting up slave-to-slave replication. The latter is done by means of
-# the opt file and include/setup_fake_relay_log.inc.
-# The master's binlog is treated as a relay log that the SQL thread executes.
-#
-
---source include/master-slave.inc
-
-#
-# Bug#31240 load data infile replication between (4.0 or 4.1) and 5.1 fails
-#
-
---echo ==== Initialize ====
---connection slave
-
---disable_query_log
-# The binlog contains the function RAND which is unsafe.
-CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
---enable_query_log
-
---source include/stop_slave.inc
-RESET SLAVE;
-
-# the relay log contains create t1, t3 tables and load data infile
---let $fake_relay_log = $MYSQL_TEST_DIR/suite/binlog/std_data/binlog_old_version_4_1.000001
---source include/setup_fake_relay_log.inc
-
---echo ==== Test ====
-start slave sql_thread;
---let $slave_param = Exec_Master_Log_Pos
-# end_log_pos of the last event of the relay log
---let $slave_param_value = 149436
---source include/wait_for_slave_param.inc
---echo ==== a prove that the fake has been processed successfully ====
-SELECT COUNT(*) - 17920 as zero FROM t3;
-
---echo ==== Clean up ====
---source include/stop_slave_sql.inc
---source include/cleanup_fake_relay_log.inc
-drop table t1, t3;
---let $rpl_only_running_threads= 1
---source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test b/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test
index f44c883ef4e..dab2e3f633a 100644
--- a/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test
+++ b/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test
@@ -55,7 +55,7 @@ connection master;
--exec $MYSQL_UPGRADE --skip-verbose --write-binlog --force --user=root > $MYSQLTEST_VARDIR/log/mysql_upgrade.log 2>&1
let $datadir= `select @@datadir`;
-remove_file $datadir/mysql_upgrade_info;
+remove_file $datadir/mariadb_upgrade_info;
connection master;
let $after_file= query_get_value(SHOW MASTER STATUS, File, 1);
diff --git a/mysql-test/suite/sys_vars/inc/explicit_defaults_for_timestamp.inc b/mysql-test/suite/sys_vars/inc/explicit_defaults_for_timestamp.inc
index 0cd8aa2c568..46acf4bac8a 100644
--- a/mysql-test/suite/sys_vars/inc/explicit_defaults_for_timestamp.inc
+++ b/mysql-test/suite/sys_vars/inc/explicit_defaults_for_timestamp.inc
@@ -105,7 +105,9 @@ SET timestamp=DEFAULT;
--echo #
--echo # MDEV-29075 Changing explicit_defaults_for_timestamp within stored procedure works inconsistently
--echo #
+--disable_warnings
set statement explicit_defaults_for_timestamp=1-@@explicit_defaults_for_timestamp for create table t1 (ts timestamp);
+--enable_warnings
show create table t1;
drop table t1;
diff --git a/mysql-test/suite/sys_vars/inc/sysvar_global_and_session_grant.inc b/mysql-test/suite/sys_vars/inc/sysvar_global_and_session_grant.inc
index 0c6d070583b..09b178deb68 100644
--- a/mysql-test/suite/sys_vars/inc/sysvar_global_and_session_grant.inc
+++ b/mysql-test/suite/sys_vars/inc/sysvar_global_and_session_grant.inc
@@ -3,11 +3,11 @@
--eval SET @global=@@global.$var
---echo # Test that "SET $var" is not allowed without $grant or SUPER
+--echo # Test that "SET $var" is not allowed without $grant
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
---eval REVOKE $grant, SUPER ON *.* FROM user1@localhost
+--eval REVOKE $grant ON *.* FROM user1@localhost
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -33,17 +33,4 @@ CREATE USER user1@localhost;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET $var" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
---eval SET GLOBAL $var=$value
---eval SET $var=$value
---eval SET SESSION $var=$value
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
--eval SET @@global.$var=@global
diff --git a/mysql-test/suite/sys_vars/inc/sysvar_global_grant.inc b/mysql-test/suite/sys_vars/inc/sysvar_global_grant.inc
index f452c1b19d9..bf121d7214f 100644
--- a/mysql-test/suite/sys_vars/inc/sysvar_global_grant.inc
+++ b/mysql-test/suite/sys_vars/inc/sysvar_global_grant.inc
@@ -1,13 +1,12 @@
--source include/not_embedded.inc
-
--eval SET @global=@@global.$var
---echo # Test that "SET $var" is not allowed without $grant or SUPER
+--echo # Test that "SET $var" is not allowed without $grant
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
---eval REVOKE $grant, SUPER ON *.* FROM user1@localhost
+--eval REVOKE $grant ON *.* FROM user1@localhost
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -35,19 +34,4 @@ CREATE USER user1@localhost;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET $var" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
---eval SET GLOBAL $var=$value
---error ER_GLOBAL_VARIABLE
---eval SET $var=$value
---error ER_GLOBAL_VARIABLE
---eval SET SESSION $var=$value
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
--eval SET @@global.$var=@global
diff --git a/mysql-test/suite/sys_vars/inc/sysvar_global_grant_alone.inc b/mysql-test/suite/sys_vars/inc/sysvar_global_grant_alone.inc
index 6a1cf1a74c0..4fb93ce4d24 100644
--- a/mysql-test/suite/sys_vars/inc/sysvar_global_grant_alone.inc
+++ b/mysql-test/suite/sys_vars/inc/sysvar_global_grant_alone.inc
@@ -1,13 +1,12 @@
--source include/not_embedded.inc
-
--eval SET @global=@@global.$var
---echo # Test that "SET GLOBAL $var" is not allowed without $grant or SUPER
+--echo # Test that "SET GLOBAL $var" is not allowed without $grant
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
---eval REVOKE $grant, SUPER ON *.* FROM user1@localhost
+--eval REVOKE $grant ON *.* FROM user1@localhost
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -27,15 +26,4 @@ CREATE USER user1@localhost;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET GLOBAL $var" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
---eval SET GLOBAL $var=$value
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
--eval SET @@global.$var=@global
diff --git a/mysql-test/suite/sys_vars/inc/sysvar_session_grant.inc b/mysql-test/suite/sys_vars/inc/sysvar_session_grant.inc
index 1cdc6e7190a..ce41d5247d0 100644
--- a/mysql-test/suite/sys_vars/inc/sysvar_session_grant.inc
+++ b/mysql-test/suite/sys_vars/inc/sysvar_session_grant.inc
@@ -3,11 +3,11 @@
--eval SET @session=@@session.$var
---echo # Test that "SET $var" is not allowed without $grant or SUPER
+--echo # Test that "SET $var" is not allowed without $grant
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
---eval REVOKE $grant, SUPER ON *.* FROM user1@localhost
+--eval REVOKE $grant ON *.* FROM user1@localhost
--connect(user1,localhost,user1,,)
--connection user1
--error ER_LOCAL_VARIABLE
@@ -34,18 +34,4 @@ CREATE USER user1@localhost;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET $var" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
---error ER_LOCAL_VARIABLE
---eval SET GLOBAL $var=$value
---eval SET $var=$value
---eval SET SESSION $var=$value
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
--eval SET @@session.$var=@session
diff --git a/mysql-test/suite/sys_vars/inc/sysvar_session_grant_alone.inc b/mysql-test/suite/sys_vars/inc/sysvar_session_grant_alone.inc
index af38623a010..b5a484a375c 100644
--- a/mysql-test/suite/sys_vars/inc/sysvar_session_grant_alone.inc
+++ b/mysql-test/suite/sys_vars/inc/sysvar_session_grant_alone.inc
@@ -3,11 +3,11 @@
--eval SET @session=@@session.$var
---echo # Test that "SET $var" is not allowed without $grant or SUPER
+--echo # Test that "SET $var" is not allowed without $grant
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
---eval REVOKE $grant, SUPER ON *.* FROM user1@localhost
+--eval REVOKE $grant ON *.* FROM user1@localhost
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -30,16 +30,4 @@ CREATE USER user1@localhost;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET $var" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
---eval SET $var=$value
---eval SET SESSION $var=$value
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
--eval SET @@session.$var=@session
diff --git a/mysql-test/suite/sys_vars/r/aria_sort_buffer_size_basic,32bit.rdiff b/mysql-test/suite/sys_vars/r/aria_sort_buffer_size_basic,32bit.rdiff
index c30b99f1f95..77bb1f81d40 100644
--- a/mysql-test/suite/sys_vars/r/aria_sort_buffer_size_basic,32bit.rdiff
+++ b/mysql-test/suite/sys_vars/r/aria_sort_buffer_size_basic,32bit.rdiff
@@ -1,5 +1,5 @@
---- suite/sys_vars/r/aria_sort_buffer_size_basic.result 2021-02-02 02:58:55.686921205 +0200
-+++ suite/sys_vars/r/aria_sort_buffer_size_basic.reject 2021-02-02 11:02:12.361178360 +0200
+--- suite/sys_vars/r/aria_sort_buffer_size_basic.result
++++ suite/sys_vars/r/aria_sort_buffer_size_basic.reject
@@ -44,5 +44,5 @@
set session aria_sort_buffer_size=cast(-1 as unsigned int);
select @@session.aria_sort_buffer_size;
diff --git a/mysql-test/suite/sys_vars/r/binlog_annotate_row_events_grant.result b/mysql-test/suite/sys_vars/r/binlog_annotate_row_events_grant.result
index 1ff25d209bd..69aa36443c6 100644
--- a/mysql-test/suite/sys_vars/r/binlog_annotate_row_events_grant.result
+++ b/mysql-test/suite/sys_vars/r/binlog_annotate_row_events_grant.result
@@ -2,18 +2,18 @@
# MDEV-21971 Bind BINLOG ADMIN to binlog_annotate_row_events and binlog_row_image global and session variables
#
SET @global=@@global.binlog_annotate_row_events;
-# Test that "SET binlog_annotate_row_events" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET binlog_annotate_row_events" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL binlog_annotate_row_events=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET binlog_annotate_row_events=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET SESSION binlog_annotate_row_events=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -28,15 +28,4 @@ SET SESSION binlog_annotate_row_events=1;
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET binlog_annotate_row_events" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL binlog_annotate_row_events=1;
-SET binlog_annotate_row_events=1;
-SET SESSION binlog_annotate_row_events=1;
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.binlog_annotate_row_events=@global;
diff --git a/mysql-test/suite/sys_vars/r/binlog_cache_size_grant.result b/mysql-test/suite/sys_vars/r/binlog_cache_size_grant.result
index e6898e58968..49dac9648c3 100644
--- a/mysql-test/suite/sys_vars/r/binlog_cache_size_grant.result
+++ b/mysql-test/suite/sys_vars/r/binlog_cache_size_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.binlog_cache_size;
-# Test that "SET binlog_cache_size" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET binlog_cache_size" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL binlog_cache_size=65536;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET binlog_cache_size=65536;
ERROR HY000: Variable 'binlog_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION binlog_cache_size=65536;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'binlog_cache_size' is a GLOBAL variable and should be set
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET binlog_cache_size" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL binlog_cache_size=65536;
-SET binlog_cache_size=65536;
-ERROR HY000: Variable 'binlog_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION binlog_cache_size=65536;
-ERROR HY000: Variable 'binlog_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.binlog_cache_size=@global;
diff --git a/mysql-test/suite/sys_vars/r/binlog_commit_wait_count_grant.result b/mysql-test/suite/sys_vars/r/binlog_commit_wait_count_grant.result
index 930772f7499..69d69cf5d5e 100644
--- a/mysql-test/suite/sys_vars/r/binlog_commit_wait_count_grant.result
+++ b/mysql-test/suite/sys_vars/r/binlog_commit_wait_count_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.binlog_commit_wait_count;
-# Test that "SET binlog_commit_wait_count" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET binlog_commit_wait_count" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL binlog_commit_wait_count=65536;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET binlog_commit_wait_count=65536;
ERROR HY000: Variable 'binlog_commit_wait_count' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION binlog_commit_wait_count=65536;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'binlog_commit_wait_count' is a GLOBAL variable and should
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET binlog_commit_wait_count" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL binlog_commit_wait_count=65536;
-SET binlog_commit_wait_count=65536;
-ERROR HY000: Variable 'binlog_commit_wait_count' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION binlog_commit_wait_count=65536;
-ERROR HY000: Variable 'binlog_commit_wait_count' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.binlog_commit_wait_count=@global;
diff --git a/mysql-test/suite/sys_vars/r/binlog_commit_wait_usec_grant.result b/mysql-test/suite/sys_vars/r/binlog_commit_wait_usec_grant.result
index cfbb759e959..e5ef4fec042 100644
--- a/mysql-test/suite/sys_vars/r/binlog_commit_wait_usec_grant.result
+++ b/mysql-test/suite/sys_vars/r/binlog_commit_wait_usec_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.binlog_commit_wait_usec;
-# Test that "SET binlog_commit_wait_usec" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET binlog_commit_wait_usec" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL binlog_commit_wait_usec=65536;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET binlog_commit_wait_usec=65536;
ERROR HY000: Variable 'binlog_commit_wait_usec' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION binlog_commit_wait_usec=65536;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'binlog_commit_wait_usec' is a GLOBAL variable and should
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET binlog_commit_wait_usec" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL binlog_commit_wait_usec=65536;
-SET binlog_commit_wait_usec=65536;
-ERROR HY000: Variable 'binlog_commit_wait_usec' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION binlog_commit_wait_usec=65536;
-ERROR HY000: Variable 'binlog_commit_wait_usec' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.binlog_commit_wait_usec=@global;
diff --git a/mysql-test/suite/sys_vars/r/binlog_direct_non_transactional_updates_grant.result b/mysql-test/suite/sys_vars/r/binlog_direct_non_transactional_updates_grant.result
index e70dbbf408f..f283d226aae 100644
--- a/mysql-test/suite/sys_vars/r/binlog_direct_non_transactional_updates_grant.result
+++ b/mysql-test/suite/sys_vars/r/binlog_direct_non_transactional_updates_grant.result
@@ -3,18 +3,18 @@ SET @session= @@global.binlog_direct_non_transactional_updates;
#
#
#
-# Test that "SET binlog_direct_non_transactional_updates" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET binlog_direct_non_transactional_updates" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET binlog_direct_non_transactional_updates=0;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET GLOBAL binlog_direct_non_transactional_updates=0;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET SESSION binlog_direct_non_transactional_updates=0;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -29,16 +29,5 @@ SET SESSION binlog_direct_non_transactional_updates=0;
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET binlog_direct_non_transactional_updates" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET binlog_direct_non_transactional_updates=0;
-SET GLOBAL binlog_direct_non_transactional_updates=0;
-SET SESSION binlog_direct_non_transactional_updates=0;
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET GLOBAL binlog_direct_non_transactional_updates=@global;
SET SESSION binlog_direct_non_transactional_updates=@session;
diff --git a/mysql-test/suite/sys_vars/r/binlog_expire_logs_seconds_grant.result b/mysql-test/suite/sys_vars/r/binlog_expire_logs_seconds_grant.result
index 94b57d2e7c3..5e86432272f 100644
--- a/mysql-test/suite/sys_vars/r/binlog_expire_logs_seconds_grant.result
+++ b/mysql-test/suite/sys_vars/r/binlog_expire_logs_seconds_grant.result
@@ -3,14 +3,14 @@
# Test that "SET binlog_expire_logs_seconds" is not allowed without BINLOG ADMIN or SUPER
#
SET @global=@@global.binlog_expire_logs_seconds;
-# Test that "SET binlog_expire_logs_seconds" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET binlog_expire_logs_seconds" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL binlog_expire_logs_seconds=10;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET binlog_expire_logs_seconds=10;
ERROR HY000: Variable 'binlog_expire_logs_seconds' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION binlog_expire_logs_seconds=10;
@@ -31,17 +31,4 @@ ERROR HY000: Variable 'binlog_expire_logs_seconds' is a GLOBAL variable and shou
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET binlog_expire_logs_seconds" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL binlog_expire_logs_seconds=10;
-SET binlog_expire_logs_seconds=10;
-ERROR HY000: Variable 'binlog_expire_logs_seconds' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION binlog_expire_logs_seconds=10;
-ERROR HY000: Variable 'binlog_expire_logs_seconds' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.binlog_expire_logs_seconds=@global;
diff --git a/mysql-test/suite/sys_vars/r/binlog_file_cache_size_grant.result b/mysql-test/suite/sys_vars/r/binlog_file_cache_size_grant.result
index 3cd5aaf57d4..38ae2098aa9 100644
--- a/mysql-test/suite/sys_vars/r/binlog_file_cache_size_grant.result
+++ b/mysql-test/suite/sys_vars/r/binlog_file_cache_size_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.binlog_file_cache_size;
-# Test that "SET binlog_file_cache_size" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET binlog_file_cache_size" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL binlog_file_cache_size=65536;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET binlog_file_cache_size=65536;
ERROR HY000: Variable 'binlog_file_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION binlog_file_cache_size=65536;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'binlog_file_cache_size' is a GLOBAL variable and should b
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET binlog_file_cache_size" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL binlog_file_cache_size=65536;
-SET binlog_file_cache_size=65536;
-ERROR HY000: Variable 'binlog_file_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION binlog_file_cache_size=65536;
-ERROR HY000: Variable 'binlog_file_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.binlog_file_cache_size=@global;
diff --git a/mysql-test/suite/sys_vars/r/binlog_format_grant.result b/mysql-test/suite/sys_vars/r/binlog_format_grant.result
index b3cd77a6bd4..ba9ec79d621 100644
--- a/mysql-test/suite/sys_vars/r/binlog_format_grant.result
+++ b/mysql-test/suite/sys_vars/r/binlog_format_grant.result
@@ -1,18 +1,18 @@
#
#
#
-# Test that "SET binlog_format" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET binlog_format" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET binlog_format=mixed;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET GLOBAL binlog_format=mixed;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET SESSION binlog_format=mixed;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -27,14 +27,3 @@ SET SESSION binlog_format=mixed;
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET binlog_format" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET binlog_format=mixed;
-SET GLOBAL binlog_format=mixed;
-SET SESSION binlog_format=mixed;
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
diff --git a/mysql-test/suite/sys_vars/r/binlog_row_image_grant.result b/mysql-test/suite/sys_vars/r/binlog_row_image_grant.result
index d9cf65d9932..c2194aec404 100644
--- a/mysql-test/suite/sys_vars/r/binlog_row_image_grant.result
+++ b/mysql-test/suite/sys_vars/r/binlog_row_image_grant.result
@@ -2,18 +2,18 @@
# MDEV-21971 Bind BINLOG ADMIN to binlog_annotate_row_events and binlog_row_image global and session variables
#
SET @global=@@global.binlog_row_image;
-# Test that "SET binlog_row_image" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET binlog_row_image" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL binlog_row_image=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET binlog_row_image=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET SESSION binlog_row_image=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -28,15 +28,4 @@ SET SESSION binlog_row_image=1;
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET binlog_row_image" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL binlog_row_image=1;
-SET binlog_row_image=1;
-SET SESSION binlog_row_image=1;
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.binlog_row_image=@global;
diff --git a/mysql-test/suite/sys_vars/r/binlog_row_metadata_grant.result b/mysql-test/suite/sys_vars/r/binlog_row_metadata_grant.result
index 43282278aa3..2c47c1481eb 100644
--- a/mysql-test/suite/sys_vars/r/binlog_row_metadata_grant.result
+++ b/mysql-test/suite/sys_vars/r/binlog_row_metadata_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.binlog_row_metadata;
-# Test that "SET binlog_row_metadata" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET binlog_row_metadata" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL binlog_row_metadata=NO_LOG;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET binlog_row_metadata=NO_LOG;
ERROR HY000: Variable 'binlog_row_metadata' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION binlog_row_metadata=NO_LOG;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'binlog_row_metadata' is a GLOBAL variable and should be s
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET binlog_row_metadata" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL binlog_row_metadata=NO_LOG;
-SET binlog_row_metadata=NO_LOG;
-ERROR HY000: Variable 'binlog_row_metadata' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION binlog_row_metadata=NO_LOG;
-ERROR HY000: Variable 'binlog_row_metadata' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.binlog_row_metadata=@global;
diff --git a/mysql-test/suite/sys_vars/r/binlog_stmt_cache_size_grant.result b/mysql-test/suite/sys_vars/r/binlog_stmt_cache_size_grant.result
index 87070de932c..ca2505e9570 100644
--- a/mysql-test/suite/sys_vars/r/binlog_stmt_cache_size_grant.result
+++ b/mysql-test/suite/sys_vars/r/binlog_stmt_cache_size_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.binlog_stmt_cache_size;
-# Test that "SET binlog_stmt_cache_size" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET binlog_stmt_cache_size" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL binlog_stmt_cache_size=65536;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET binlog_stmt_cache_size=65536;
ERROR HY000: Variable 'binlog_stmt_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION binlog_stmt_cache_size=65536;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'binlog_stmt_cache_size' is a GLOBAL variable and should b
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET binlog_stmt_cache_size" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL binlog_stmt_cache_size=65536;
-SET binlog_stmt_cache_size=65536;
-ERROR HY000: Variable 'binlog_stmt_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION binlog_stmt_cache_size=65536;
-ERROR HY000: Variable 'binlog_stmt_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.binlog_stmt_cache_size=@global;
diff --git a/mysql-test/suite/sys_vars/r/connect_timeout_grant.result b/mysql-test/suite/sys_vars/r/connect_timeout_grant.result
index 96351490f6f..488649a4a3a 100644
--- a/mysql-test/suite/sys_vars/r/connect_timeout_grant.result
+++ b/mysql-test/suite/sys_vars/r/connect_timeout_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.connect_timeout;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET connect_timeout" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET connect_timeout" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL connect_timeout=10;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET connect_timeout=10;
ERROR HY000: Variable 'connect_timeout' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION connect_timeout=10;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'connect_timeout' is a GLOBAL variable and should be set w
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET connect_timeout" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL connect_timeout=10;
-SET connect_timeout=10;
-ERROR HY000: Variable 'connect_timeout' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION connect_timeout=10;
-ERROR HY000: Variable 'connect_timeout' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.connect_timeout=@global;
diff --git a/mysql-test/suite/sys_vars/r/disconnect_on_expired_password_grant.result b/mysql-test/suite/sys_vars/r/disconnect_on_expired_password_grant.result
index b7152351a5d..e3bc2754e85 100644
--- a/mysql-test/suite/sys_vars/r/disconnect_on_expired_password_grant.result
+++ b/mysql-test/suite/sys_vars/r/disconnect_on_expired_password_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.disconnect_on_expired_password;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET disconnect_on_expired_password" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET disconnect_on_expired_password" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL disconnect_on_expired_password=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET disconnect_on_expired_password=1;
ERROR HY000: Variable 'disconnect_on_expired_password' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION disconnect_on_expired_password=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'disconnect_on_expired_password' is a GLOBAL variable and
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET disconnect_on_expired_password" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL disconnect_on_expired_password=1;
-SET disconnect_on_expired_password=1;
-ERROR HY000: Variable 'disconnect_on_expired_password' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION disconnect_on_expired_password=1;
-ERROR HY000: Variable 'disconnect_on_expired_password' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.disconnect_on_expired_password=@global;
diff --git a/mysql-test/suite/sys_vars/r/expire_logs_days_grant.result b/mysql-test/suite/sys_vars/r/expire_logs_days_grant.result
index f7a3ddc76c1..4dcff017357 100644
--- a/mysql-test/suite/sys_vars/r/expire_logs_days_grant.result
+++ b/mysql-test/suite/sys_vars/r/expire_logs_days_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.expire_logs_days;
-# Test that "SET expire_logs_days" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET expire_logs_days" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL expire_logs_days=33;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET expire_logs_days=33;
ERROR HY000: Variable 'expire_logs_days' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION expire_logs_days=33;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'expire_logs_days' is a GLOBAL variable and should be set
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET expire_logs_days" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL expire_logs_days=33;
-SET expire_logs_days=33;
-ERROR HY000: Variable 'expire_logs_days' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION expire_logs_days=33;
-ERROR HY000: Variable 'expire_logs_days' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.expire_logs_days=@global;
diff --git a/mysql-test/suite/sys_vars/r/explicit_defaults_for_timestamp_off.result b/mysql-test/suite/sys_vars/r/explicit_defaults_for_timestamp_off.result
index 1d779352df5..a373e6aefca 100644
--- a/mysql-test/suite/sys_vars/r/explicit_defaults_for_timestamp_off.result
+++ b/mysql-test/suite/sys_vars/r/explicit_defaults_for_timestamp_off.result
@@ -1,4 +1,6 @@
set @@explicit_defaults_for_timestamp=0;
+Warnings:
+Warning 1681 'explicit_defaults_for_timestamp=0' is deprecated and will be removed in a future release
CREATE TABLE t1 (a TIMESTAMP);
SHOW CREATE TABLE t1;
Table Create Table
@@ -224,6 +226,8 @@ t1 CREATE TABLE `t1` (
`a` timestamp NULL DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
set explicit_defaults_for_timestamp=1-@@explicit_defaults_for_timestamp;
+Warnings:
+Warning 1681 'explicit_defaults_for_timestamp=0' is deprecated and will be removed in a future release
execute stmt;
show create table t1;
Table Create Table
diff --git a/mysql-test/suite/sys_vars/r/explicit_defaults_for_timestamp_on.result b/mysql-test/suite/sys_vars/r/explicit_defaults_for_timestamp_on.result
index 85cbfbc2962..37d5a82b021 100644
--- a/mysql-test/suite/sys_vars/r/explicit_defaults_for_timestamp_on.result
+++ b/mysql-test/suite/sys_vars/r/explicit_defaults_for_timestamp_on.result
@@ -233,6 +233,8 @@ t1 CREATE TABLE `t1` (
`a` timestamp NULL DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
set explicit_defaults_for_timestamp=1-@@explicit_defaults_for_timestamp;
+Warnings:
+Warning 1681 'explicit_defaults_for_timestamp=0' is deprecated and will be removed in a future release
execute stmt;
show create table t1;
Table Create Table
diff --git a/mysql-test/suite/sys_vars/r/extra_max_connections_grant.result b/mysql-test/suite/sys_vars/r/extra_max_connections_grant.result
index 2f211dd5661..2663cfadd95 100644
--- a/mysql-test/suite/sys_vars/r/extra_max_connections_grant.result
+++ b/mysql-test/suite/sys_vars/r/extra_max_connections_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.extra_max_connections;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET extra_max_connections" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET extra_max_connections" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL extra_max_connections=10;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET extra_max_connections=10;
ERROR HY000: Variable 'extra_max_connections' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION extra_max_connections=10;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'extra_max_connections' is a GLOBAL variable and should be
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET extra_max_connections" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL extra_max_connections=10;
-SET extra_max_connections=10;
-ERROR HY000: Variable 'extra_max_connections' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION extra_max_connections=10;
-ERROR HY000: Variable 'extra_max_connections' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.extra_max_connections=@global;
diff --git a/mysql-test/suite/sys_vars/r/gtid_binlog_state_grant.result b/mysql-test/suite/sys_vars/r/gtid_binlog_state_grant.result
index 0ccf610cc28..4aa0a800c9b 100644
--- a/mysql-test/suite/sys_vars/r/gtid_binlog_state_grant.result
+++ b/mysql-test/suite/sys_vars/r/gtid_binlog_state_grant.result
@@ -1,14 +1,14 @@
#
# MDEV-21973 Bind REPLICATION {MASTER|SLAVE} ADMIN to gtid_* GLOBAL-only system variables
#
-# Test that "SET gtid_binlog_state" is not allowed without REPLICATION MASTER ADMIN or SUPER
+# Test that "SET gtid_binlog_state" is not allowed without REPLICATION MASTER ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION MASTER ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION MASTER ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL gtid_binlog_state='0-1-10';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION MASTER ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION MASTER ADMIN privilege(s) for this operation
SET gtid_binlog_state='0-1-10';
ERROR HY000: Variable 'gtid_binlog_state' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION gtid_binlog_state='0-1-10';
@@ -30,17 +30,3 @@ ERROR HY000: Variable 'gtid_binlog_state' is a GLOBAL variable and should be set
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET gtid_binlog_state" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL gtid_binlog_state='0-1-10';
-ERROR HY000: Binlog closed, cannot RESET MASTER
-SET gtid_binlog_state='0-1-10';
-ERROR HY000: Variable 'gtid_binlog_state' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION gtid_binlog_state='0-1-10';
-ERROR HY000: Variable 'gtid_binlog_state' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
diff --git a/mysql-test/suite/sys_vars/r/gtid_cleanup_batch_size_grant.result b/mysql-test/suite/sys_vars/r/gtid_cleanup_batch_size_grant.result
index 326fb47d48c..33a39d1ce2d 100644
--- a/mysql-test/suite/sys_vars/r/gtid_cleanup_batch_size_grant.result
+++ b/mysql-test/suite/sys_vars/r/gtid_cleanup_batch_size_grant.result
@@ -2,14 +2,14 @@
# MDEV-21973 Bind REPLICATION {MASTER|SLAVE} ADMIN to gtid_* GLOBAL-only system variables
#
SET @global=@@global.gtid_cleanup_batch_size;
-# Test that "SET gtid_cleanup_batch_size" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET gtid_cleanup_batch_size" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL gtid_cleanup_batch_size=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET gtid_cleanup_batch_size=1;
ERROR HY000: Variable 'gtid_cleanup_batch_size' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION gtid_cleanup_batch_size=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'gtid_cleanup_batch_size' is a GLOBAL variable and should
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET gtid_cleanup_batch_size" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL gtid_cleanup_batch_size=1;
-SET gtid_cleanup_batch_size=1;
-ERROR HY000: Variable 'gtid_cleanup_batch_size' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION gtid_cleanup_batch_size=1;
-ERROR HY000: Variable 'gtid_cleanup_batch_size' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.gtid_cleanup_batch_size=@global;
diff --git a/mysql-test/suite/sys_vars/r/gtid_domain_id_grant.result b/mysql-test/suite/sys_vars/r/gtid_domain_id_grant.result
index 096f5136ab0..9d1ac3de3ea 100644
--- a/mysql-test/suite/sys_vars/r/gtid_domain_id_grant.result
+++ b/mysql-test/suite/sys_vars/r/gtid_domain_id_grant.result
@@ -2,14 +2,14 @@
# MDEV-21975 Add BINLOG REPLAY privilege and bind new privileges to gtid_seq_no, preudo_thread_id, server_id, gtid_domain_id
#
SET @global=@@global.gtid_domain_id;
-# Test that "SET GLOBAL gtid_domain_id" is not allowed without REPLICATION MASTER ADMIN or SUPER
+# Test that "SET GLOBAL gtid_domain_id" is not allowed without REPLICATION MASTER ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION MASTER ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION MASTER ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL gtid_domain_id=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION MASTER ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION MASTER ADMIN privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -22,27 +22,18 @@ SET GLOBAL gtid_domain_id=1;
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET GLOBAL gtid_domain_id" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL gtid_domain_id=1;
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.gtid_domain_id=@global;
SET @session=@@session.gtid_domain_id;
-# Test that "SET gtid_domain_id" is not allowed without BINLOG REPLAY or SUPER
+# Test that "SET gtid_domain_id" is not allowed without BINLOG REPLAY
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG REPLAY, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG REPLAY ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET gtid_domain_id=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation
SET SESSION gtid_domain_id=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -56,14 +47,4 @@ SET SESSION gtid_domain_id=1;
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET gtid_domain_id" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET gtid_domain_id=1;
-SET SESSION gtid_domain_id=1;
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@session.gtid_domain_id=@session;
diff --git a/mysql-test/suite/sys_vars/r/gtid_ignore_duplicates_grant.result b/mysql-test/suite/sys_vars/r/gtid_ignore_duplicates_grant.result
index f4c95fb6192..ed6faac5c84 100644
--- a/mysql-test/suite/sys_vars/r/gtid_ignore_duplicates_grant.result
+++ b/mysql-test/suite/sys_vars/r/gtid_ignore_duplicates_grant.result
@@ -2,14 +2,14 @@
# MDEV-21973 Bind REPLICATION {MASTER|SLAVE} ADMIN to gtid_* GLOBAL-only system variables
#
SET @global=@@global.gtid_ignore_duplicates;
-# Test that "SET gtid_ignore_duplicates" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET gtid_ignore_duplicates" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL gtid_ignore_duplicates=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET gtid_ignore_duplicates=1;
ERROR HY000: Variable 'gtid_ignore_duplicates' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION gtid_ignore_duplicates=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'gtid_ignore_duplicates' is a GLOBAL variable and should b
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET gtid_ignore_duplicates" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL gtid_ignore_duplicates=1;
-SET gtid_ignore_duplicates=1;
-ERROR HY000: Variable 'gtid_ignore_duplicates' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION gtid_ignore_duplicates=1;
-ERROR HY000: Variable 'gtid_ignore_duplicates' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.gtid_ignore_duplicates=@global;
diff --git a/mysql-test/suite/sys_vars/r/gtid_pos_auto_engines_grant.result b/mysql-test/suite/sys_vars/r/gtid_pos_auto_engines_grant.result
index 60fae07cf7c..f3ee01798e6 100644
--- a/mysql-test/suite/sys_vars/r/gtid_pos_auto_engines_grant.result
+++ b/mysql-test/suite/sys_vars/r/gtid_pos_auto_engines_grant.result
@@ -2,14 +2,14 @@
# MDEV-21973 Bind REPLICATION {MASTER|SLAVE} ADMIN to gtid_* GLOBAL-only system variables
#
SET @global=@@global.gtid_pos_auto_engines;
-# Test that "SET gtid_pos_auto_engines" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET gtid_pos_auto_engines" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL gtid_pos_auto_engines='';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET gtid_pos_auto_engines='';
ERROR HY000: Variable 'gtid_pos_auto_engines' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION gtid_pos_auto_engines='';
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'gtid_pos_auto_engines' is a GLOBAL variable and should be
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET gtid_pos_auto_engines" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL gtid_pos_auto_engines='';
-SET gtid_pos_auto_engines='';
-ERROR HY000: Variable 'gtid_pos_auto_engines' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION gtid_pos_auto_engines='';
-ERROR HY000: Variable 'gtid_pos_auto_engines' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.gtid_pos_auto_engines=@global;
diff --git a/mysql-test/suite/sys_vars/r/gtid_seq_no_grant.result b/mysql-test/suite/sys_vars/r/gtid_seq_no_grant.result
index 0d08cde546c..d8a5d976e8a 100644
--- a/mysql-test/suite/sys_vars/r/gtid_seq_no_grant.result
+++ b/mysql-test/suite/sys_vars/r/gtid_seq_no_grant.result
@@ -2,18 +2,18 @@
# MDEV-21975 Add BINLOG REPLAY privilege and bind new privileges to gtid_seq_no, preudo_thread_id, server_id, gtid_domain_id
#
SET @session=@@session.gtid_seq_no;
-# Test that "SET gtid_seq_no" is not allowed without BINLOG REPLAY or SUPER
+# Test that "SET gtid_seq_no" is not allowed without BINLOG REPLAY
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG REPLAY, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG REPLAY ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL gtid_seq_no=1;
ERROR HY000: Variable 'gtid_seq_no' is a SESSION variable and can't be used with SET GLOBAL
SET gtid_seq_no=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation
SET SESSION gtid_seq_no=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -29,16 +29,4 @@ SET SESSION gtid_seq_no=1;
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET gtid_seq_no" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL gtid_seq_no=1;
-ERROR HY000: Variable 'gtid_seq_no' is a SESSION variable and can't be used with SET GLOBAL
-SET gtid_seq_no=1;
-SET SESSION gtid_seq_no=1;
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@session.gtid_seq_no=@session;
diff --git a/mysql-test/suite/sys_vars/r/gtid_slave_pos_grant.result b/mysql-test/suite/sys_vars/r/gtid_slave_pos_grant.result
index 52918706e3f..dfad91fbf6b 100644
--- a/mysql-test/suite/sys_vars/r/gtid_slave_pos_grant.result
+++ b/mysql-test/suite/sys_vars/r/gtid_slave_pos_grant.result
@@ -2,14 +2,14 @@
# MDEV-21973 Bind REPLICATION {MASTER|SLAVE} ADMIN to gtid_* GLOBAL-only system variables
#
SET @global=@@global.gtid_slave_pos;
-# Test that "SET gtid_slave_pos" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET gtid_slave_pos" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL gtid_slave_pos='';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET gtid_slave_pos='';
ERROR HY000: Variable 'gtid_slave_pos' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION gtid_slave_pos='';
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'gtid_slave_pos' is a GLOBAL variable and should be set wi
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET gtid_slave_pos" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL gtid_slave_pos='';
-SET gtid_slave_pos='';
-ERROR HY000: Variable 'gtid_slave_pos' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION gtid_slave_pos='';
-ERROR HY000: Variable 'gtid_slave_pos' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.gtid_slave_pos=@global;
diff --git a/mysql-test/suite/sys_vars/r/gtid_strict_mode_grant.result b/mysql-test/suite/sys_vars/r/gtid_strict_mode_grant.result
index 0ea4d0ab0b3..8744d7433cb 100644
--- a/mysql-test/suite/sys_vars/r/gtid_strict_mode_grant.result
+++ b/mysql-test/suite/sys_vars/r/gtid_strict_mode_grant.result
@@ -2,14 +2,14 @@
# MDEV-21973 Bind REPLICATION {MASTER|SLAVE} ADMIN to gtid_* GLOBAL-only system variables
#
SET @global=@@global.gtid_strict_mode;
-# Test that "SET gtid_strict_mode" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET gtid_strict_mode" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL gtid_strict_mode=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET gtid_strict_mode=1;
ERROR HY000: Variable 'gtid_strict_mode' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION gtid_strict_mode=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'gtid_strict_mode' is a GLOBAL variable and should be set
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET gtid_strict_mode" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL gtid_strict_mode=1;
-SET gtid_strict_mode=1;
-ERROR HY000: Variable 'gtid_strict_mode' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION gtid_strict_mode=1;
-ERROR HY000: Variable 'gtid_strict_mode' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.gtid_strict_mode=@global;
diff --git a/mysql-test/suite/sys_vars/r/histogram_type_basic.result b/mysql-test/suite/sys_vars/r/histogram_type_basic.result
index c24192002aa..8dbd32512f8 100644
--- a/mysql-test/suite/sys_vars/r/histogram_type_basic.result
+++ b/mysql-test/suite/sys_vars/r/histogram_type_basic.result
@@ -10,7 +10,7 @@ SET @@global.histogram_type = 1;
SET @@global.histogram_type = DEFAULT;
SELECT @@global.histogram_type;
@@global.histogram_type
-DOUBLE_PREC_HB
+JSON_HB
SET @@global.histogram_type = 0;
SELECT @@global.histogram_type;
@@global.histogram_type
diff --git a/mysql-test/suite/sys_vars/r/init_connect_grant.result b/mysql-test/suite/sys_vars/r/init_connect_grant.result
index 6c3726708f4..2575052c629 100644
--- a/mysql-test/suite/sys_vars/r/init_connect_grant.result
+++ b/mysql-test/suite/sys_vars/r/init_connect_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.init_connect;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET init_connect" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET init_connect" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL init_connect="SET @xxx=1";
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET init_connect="SET @xxx=1";
ERROR HY000: Variable 'init_connect' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION init_connect="SET @xxx=1";
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'init_connect' is a GLOBAL variable and should be set with
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET init_connect" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL init_connect="SET @xxx=1";
-SET init_connect="SET @xxx=1";
-ERROR HY000: Variable 'init_connect' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION init_connect="SET @xxx=1";
-ERROR HY000: Variable 'init_connect' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.init_connect=@global;
diff --git a/mysql-test/suite/sys_vars/r/init_slave_grant.result b/mysql-test/suite/sys_vars/r/init_slave_grant.result
index 59639b92482..9655498de36 100644
--- a/mysql-test/suite/sys_vars/r/init_slave_grant.result
+++ b/mysql-test/suite/sys_vars/r/init_slave_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.init_slave;
-# Test that "SET init_slave" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET init_slave" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL init_slave='SET @x=1';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET init_slave='SET @x=1';
ERROR HY000: Variable 'init_slave' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION init_slave='SET @x=1';
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'init_slave' is a GLOBAL variable and should be set with S
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET init_slave" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL init_slave='SET @x=1';
-SET init_slave='SET @x=1';
-ERROR HY000: Variable 'init_slave' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION init_slave='SET @x=1';
-ERROR HY000: Variable 'init_slave' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.init_slave=@global;
diff --git a/mysql-test/suite/sys_vars/r/innodb_change_buffer_max_size_basic.result b/mysql-test/suite/sys_vars/r/innodb_change_buffer_max_size_basic.result
deleted file mode 100644
index 03f11ece358..00000000000
--- a/mysql-test/suite/sys_vars/r/innodb_change_buffer_max_size_basic.result
+++ /dev/null
@@ -1,77 +0,0 @@
-SET @start_global_value = @@global.innodb_change_buffer_max_size;
-SELECT @start_global_value;
-@start_global_value
-25
-Valid values are between 0 and 50
-select @@global.innodb_change_buffer_max_size between 0 and 50;
-@@global.innodb_change_buffer_max_size between 0 and 50
-1
-select @@global.innodb_change_buffer_max_size;
-@@global.innodb_change_buffer_max_size
-25
-select @@session.innodb_change_buffer_max_size;
-ERROR HY000: Variable 'innodb_change_buffer_max_size' is a GLOBAL variable
-show global variables like 'innodb_change_buffer_max_size';
-Variable_name Value
-innodb_change_buffer_max_size 25
-show session variables like 'innodb_change_buffer_max_size';
-Variable_name Value
-innodb_change_buffer_max_size 25
-select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFER_MAX_SIZE 25
-select * from information_schema.session_variables where variable_name='innodb_change_buffer_max_size';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFER_MAX_SIZE 25
-set global innodb_change_buffer_max_size=10;
-select @@global.innodb_change_buffer_max_size;
-@@global.innodb_change_buffer_max_size
-10
-select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFER_MAX_SIZE 10
-select * from information_schema.session_variables where variable_name='innodb_change_buffer_max_size';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFER_MAX_SIZE 10
-set session innodb_change_buffer_max_size=1;
-ERROR HY000: Variable 'innodb_change_buffer_max_size' is a GLOBAL variable and should be set with SET GLOBAL
-set global innodb_change_buffer_max_size=1.1;
-ERROR 42000: Incorrect argument type to variable 'innodb_change_buffer_max_size'
-set global innodb_change_buffer_max_size=1e1;
-ERROR 42000: Incorrect argument type to variable 'innodb_change_buffer_max_size'
-set global innodb_change_buffer_max_size="foo";
-ERROR 42000: Incorrect argument type to variable 'innodb_change_buffer_max_size'
-set global innodb_change_buffer_max_size=-7;
-Warnings:
-Warning 1292 Truncated incorrect innodb_change_buffer_max_size value: '-7'
-select @@global.innodb_change_buffer_max_size;
-@@global.innodb_change_buffer_max_size
-0
-select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFER_MAX_SIZE 0
-set global innodb_change_buffer_max_size=56;
-Warnings:
-Warning 1292 Truncated incorrect innodb_change_buffer_max_size value: '56'
-select @@global.innodb_change_buffer_max_size;
-@@global.innodb_change_buffer_max_size
-50
-select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFER_MAX_SIZE 50
-set global innodb_change_buffer_max_size=0;
-select @@global.innodb_change_buffer_max_size;
-@@global.innodb_change_buffer_max_size
-0
-set global innodb_change_buffer_max_size=50;
-select @@global.innodb_change_buffer_max_size;
-@@global.innodb_change_buffer_max_size
-50
-set global innodb_change_buffer_max_size=DEFAULT;
-select @@global.innodb_change_buffer_max_size;
-@@global.innodb_change_buffer_max_size
-25
-SET @@global.innodb_change_buffer_max_size = @start_global_value;
-SELECT @@global.innodb_change_buffer_max_size;
-@@global.innodb_change_buffer_max_size
-25
diff --git a/mysql-test/suite/sys_vars/r/innodb_change_buffering_basic.result b/mysql-test/suite/sys_vars/r/innodb_change_buffering_basic.result
deleted file mode 100644
index f3b7ac80523..00000000000
--- a/mysql-test/suite/sys_vars/r/innodb_change_buffering_basic.result
+++ /dev/null
@@ -1,73 +0,0 @@
-SET @start_global_value = @@global.innodb_change_buffering;
-SELECT @start_global_value;
-@start_global_value
-none
-Valid values are 'all', 'deletes', 'changes', 'inserts', 'none', 'purges'
-select @@global.innodb_change_buffering in ('all', 'deletes', 'changes', 'inserts', 'none', 'purges');
-@@global.innodb_change_buffering in ('all', 'deletes', 'changes', 'inserts', 'none', 'purges')
-1
-select @@global.innodb_change_buffering;
-@@global.innodb_change_buffering
-none
-select @@session.innodb_change_buffering;
-ERROR HY000: Variable 'innodb_change_buffering' is a GLOBAL variable
-show global variables like 'innodb_change_buffering';
-Variable_name Value
-innodb_change_buffering none
-show session variables like 'innodb_change_buffering';
-Variable_name Value
-innodb_change_buffering none
-select * from information_schema.global_variables where variable_name='innodb_change_buffering';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING none
-select * from information_schema.session_variables where variable_name='innodb_change_buffering';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING none
-set global innodb_change_buffering='none';
-Warnings:
-Warning 1287 '@@innodb_change_buffering' is deprecated and will be removed in a future release
-select @@global.innodb_change_buffering;
-@@global.innodb_change_buffering
-none
-select * from information_schema.global_variables where variable_name='innodb_change_buffering';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING none
-select * from information_schema.session_variables where variable_name='innodb_change_buffering';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING none
-set @@global.innodb_change_buffering='inserts';
-Warnings:
-Warning 1287 '@@innodb_change_buffering' is deprecated and will be removed in a future release
-select @@global.innodb_change_buffering;
-@@global.innodb_change_buffering
-inserts
-select * from information_schema.global_variables where variable_name='innodb_change_buffering';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING inserts
-select * from information_schema.session_variables where variable_name='innodb_change_buffering';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING inserts
-set session innodb_change_buffering='some';
-ERROR HY000: Variable 'innodb_change_buffering' is a GLOBAL variable and should be set with SET GLOBAL
-set @@session.innodb_change_buffering='some';
-ERROR HY000: Variable 'innodb_change_buffering' is a GLOBAL variable and should be set with SET GLOBAL
-set global innodb_change_buffering=1.1;
-ERROR 42000: Incorrect argument type to variable 'innodb_change_buffering'
-set global innodb_change_buffering=1;
-Warnings:
-Warning 1287 '@@innodb_change_buffering' is deprecated and will be removed in a future release
-SELECT @@global.innodb_change_buffering;
-@@global.innodb_change_buffering
-inserts
-set global innodb_change_buffering=-2;
-ERROR 42000: Variable 'innodb_change_buffering' can't be set to the value of '-2'
-set global innodb_change_buffering=1e1;
-ERROR 42000: Incorrect argument type to variable 'innodb_change_buffering'
-set global innodb_change_buffering='some';
-ERROR 42000: Variable 'innodb_change_buffering' can't be set to the value of 'some'
-SET @@global.innodb_change_buffering = @start_global_value;
-Warnings:
-Warning 1287 '@@innodb_change_buffering' is deprecated and will be removed in a future release
-SELECT @@global.innodb_change_buffering;
-@@global.innodb_change_buffering
-none
diff --git a/mysql-test/suite/sys_vars/r/innodb_change_buffering_debug_basic.result b/mysql-test/suite/sys_vars/r/innodb_change_buffering_debug_basic.result
deleted file mode 100644
index 2cb3fc76e30..00000000000
--- a/mysql-test/suite/sys_vars/r/innodb_change_buffering_debug_basic.result
+++ /dev/null
@@ -1,67 +0,0 @@
-SET @start_global_value = @@global.innodb_change_buffering_debug;
-SELECT @start_global_value;
-@start_global_value
-0
-select @@global.innodb_change_buffering_debug in (0, 1);
-@@global.innodb_change_buffering_debug in (0, 1)
-1
-select @@global.innodb_change_buffering_debug;
-@@global.innodb_change_buffering_debug
-0
-select @@session.innodb_change_buffering_debug;
-ERROR HY000: Variable 'innodb_change_buffering_debug' is a GLOBAL variable
-show global variables like 'innodb_change_buffering_debug';
-Variable_name Value
-innodb_change_buffering_debug 0
-show session variables like 'innodb_change_buffering_debug';
-Variable_name Value
-innodb_change_buffering_debug 0
-select * from information_schema.global_variables where variable_name='innodb_change_buffering_debug';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING_DEBUG 0
-select * from information_schema.session_variables where variable_name='innodb_change_buffering_debug';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING_DEBUG 0
-set global innodb_change_buffering_debug=1;
-select @@global.innodb_change_buffering_debug;
-@@global.innodb_change_buffering_debug
-1
-select * from information_schema.global_variables where variable_name='innodb_change_buffering_debug';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING_DEBUG 1
-select * from information_schema.session_variables where variable_name='innodb_change_buffering_debug';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING_DEBUG 1
-set @@global.innodb_change_buffering_debug=0;
-select @@global.innodb_change_buffering_debug;
-@@global.innodb_change_buffering_debug
-0
-select * from information_schema.global_variables where variable_name='innodb_change_buffering_debug';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING_DEBUG 0
-select * from information_schema.session_variables where variable_name='innodb_change_buffering_debug';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_CHANGE_BUFFERING_DEBUG 0
-set session innodb_change_buffering_debug='some';
-ERROR HY000: Variable 'innodb_change_buffering_debug' is a GLOBAL variable and should be set with SET GLOBAL
-set @@session.innodb_change_buffering_debug='some';
-ERROR HY000: Variable 'innodb_change_buffering_debug' is a GLOBAL variable and should be set with SET GLOBAL
-set global innodb_change_buffering_debug=1.1;
-ERROR 42000: Incorrect argument type to variable 'innodb_change_buffering_debug'
-set global innodb_change_buffering_debug='foo';
-ERROR 42000: Incorrect argument type to variable 'innodb_change_buffering_debug'
-set global innodb_change_buffering_debug=-2;
-Warnings:
-Warning 1292 Truncated incorrect innodb_change_buffering_debug value: '-2'
-set global innodb_change_buffering_debug=1e1;
-ERROR 42000: Incorrect argument type to variable 'innodb_change_buffering_debug'
-set global innodb_change_buffering_debug=2;
-Warnings:
-Warning 1292 Truncated incorrect innodb_change_buffering_debug value: '2'
-select @@global.innodb_change_buffering_debug;
-@@global.innodb_change_buffering_debug
-1
-SET @@global.innodb_change_buffering_debug = @start_global_value;
-SELECT @@global.innodb_change_buffering_debug;
-@@global.innodb_change_buffering_debug
-0
diff --git a/mysql-test/suite/sys_vars/r/innodb_defragment_basic.result b/mysql-test/suite/sys_vars/r/innodb_defragment_basic.result
index 916bb5ca1a9..d9226fee99f 100644
--- a/mysql-test/suite/sys_vars/r/innodb_defragment_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_defragment_basic.result
@@ -3,10 +3,14 @@ SELECT @orig;
@orig
0
SET GLOBAL innodb_defragment = OFF;
+Warnings:
+Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment;
@@global.innodb_defragment
0
SET GLOBAL innodb_defragment = ON;
+Warnings:
+Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment;
@@global.innodb_defragment
1
@@ -16,3 +20,5 @@ SELECT @@global.innodb_defragment;
@@global.innodb_defragment
1
SET GLOBAL innodb_defragment = @orig;
+Warnings:
+Warning 1287 '@@innodb_defragment' is deprecated and will be removed in a future release
diff --git a/mysql-test/suite/sys_vars/r/innodb_defragment_fill_factor_basic.result b/mysql-test/suite/sys_vars/r/innodb_defragment_fill_factor_basic.result
index 93a5af727c3..04db2580f15 100644
--- a/mysql-test/suite/sys_vars/r/innodb_defragment_fill_factor_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_defragment_fill_factor_basic.result
@@ -6,25 +6,33 @@ SELECT COUNT(@@global.innodb_defragment_fill_factor);
COUNT(@@global.innodb_defragment_fill_factor)
1
SET @@global.innodb_defragment_fill_factor = 0.77777777777777;
+Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
0.777778
SET @@global.innodb_defragment_fill_factor = 1;
+Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
1.000000
SET @@global.innodb_defragment_fill_factor = 0.7;
+Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
0.700000
SET @@global.innodb_defragment_fill_factor = -1;
Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_fill_factor value: '-1'
SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
0.700000
SET @@global.innodb_defragment_fill_factor = 2;
Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_fill_factor value: '2'
SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
@@ -35,3 +43,5 @@ SELECT @@global.innodb_defragment_fill_factor;
@@global.innodb_defragment_fill_factor
1.000000
SET @@global.innodb_defragment_fill_factor = @start_innodb_defragment_fill_factor;
+Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor' is deprecated and will be removed in a future release
diff --git a/mysql-test/suite/sys_vars/r/innodb_defragment_fill_factor_n_recs_basic.result b/mysql-test/suite/sys_vars/r/innodb_defragment_fill_factor_n_recs_basic.result
index a8ca081d0f0..12bec5204f6 100644
--- a/mysql-test/suite/sys_vars/r/innodb_defragment_fill_factor_n_recs_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_defragment_fill_factor_n_recs_basic.result
@@ -6,25 +6,33 @@ SELECT COUNT(@@global.innodb_defragment_fill_factor_n_recs);
COUNT(@@global.innodb_defragment_fill_factor_n_recs)
1
SET @@global.innodb_defragment_fill_factor_n_recs = 50;
+Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
50
SET @@global.innodb_defragment_fill_factor_n_recs = 100;
+Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
100
SET @@global.innodb_defragment_fill_factor_n_recs = 1;
+Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
1
SET @@global.innodb_defragment_fill_factor_n_recs = -1;
Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_fill_factor... value: '-1'
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
1
SET @@global.innodb_defragment_fill_factor_n_recs = 10000;
Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_fill_factor... value: '10000'
SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
@@ -40,3 +48,5 @@ SELECT @@global.innodb_defragment_fill_factor_n_recs;
@@global.innodb_defragment_fill_factor_n_recs
100
SET @@global.innodb_defragment_fill_factor_n_recs = @start_innodb_defragment_fill_factor_n_recs;
+Warnings:
+Warning 1287 '@@innodb_defragment_fill_factor_n_recs' is deprecated and will be removed in a future release
diff --git a/mysql-test/suite/sys_vars/r/innodb_defragment_frequency_basic.result b/mysql-test/suite/sys_vars/r/innodb_defragment_frequency_basic.result
index d4314d6506e..b9f76d60dac 100644
--- a/mysql-test/suite/sys_vars/r/innodb_defragment_frequency_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_defragment_frequency_basic.result
@@ -6,25 +6,33 @@ SELECT COUNT(@@global.innodb_defragment_frequency);
COUNT(@@global.innodb_defragment_frequency)
1
SET @@global.innodb_defragment_frequency = 200;
+Warnings:
+Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
200
SET @@global.innodb_defragment_frequency = 1;
+Warnings:
+Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
1
SET @@global.innodb_defragment_frequency = 1000;
+Warnings:
+Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
1000
SET @@global.innodb_defragment_frequency = -1;
Warnings:
+Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_frequency value: '-1'
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
1
SET @@global.innodb_defragment_frequency = 10000;
Warnings:
+Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_frequency value: '10000'
SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
@@ -40,3 +48,5 @@ SELECT @@global.innodb_defragment_frequency;
@@global.innodb_defragment_frequency
1000
SET @@global.innodb_defragment_frequency = @start_innodb_defragment_frequency;
+Warnings:
+Warning 1287 '@@innodb_defragment_frequency' is deprecated and will be removed in a future release
diff --git a/mysql-test/suite/sys_vars/r/innodb_defragment_n_pages_basic.result b/mysql-test/suite/sys_vars/r/innodb_defragment_n_pages_basic.result
index 99b68b39ec4..823ddad7d59 100644
--- a/mysql-test/suite/sys_vars/r/innodb_defragment_n_pages_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_defragment_n_pages_basic.result
@@ -7,22 +7,30 @@ COUNT(@@global.innodb_defragment_n_pages)
1
SET @@global.innodb_defragment_n_pages = 1;
Warnings:
+Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_n_pages value: '1'
SELECT @@global.innodb_defragment_n_pages;
@@global.innodb_defragment_n_pages
2
SET @@global.innodb_defragment_n_pages = 2;
+Warnings:
+Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_n_pages;
@@global.innodb_defragment_n_pages
2
SET @@global.innodb_defragment_n_pages = 32;
+Warnings:
+Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_n_pages;
@@global.innodb_defragment_n_pages
32
SET @@global.innodb_defragment_n_pages = 64;
Warnings:
+Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_n_pages value: '64'
SELECT @@global.innodb_defragment_n_pages;
@@global.innodb_defragment_n_pages
32
SET @@global.innodb_defragment_n_pages = @start_innodb_defragment_n_pages;
+Warnings:
+Warning 1287 '@@innodb_defragment_n_pages' is deprecated and will be removed in a future release
diff --git a/mysql-test/suite/sys_vars/r/innodb_defragment_stats_accuracy_basic.result b/mysql-test/suite/sys_vars/r/innodb_defragment_stats_accuracy_basic.result
index 025dacdb1ec..7a2abfa1350 100644
--- a/mysql-test/suite/sys_vars/r/innodb_defragment_stats_accuracy_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_defragment_stats_accuracy_basic.result
@@ -6,21 +6,27 @@ SELECT COUNT(@@global.innodb_defragment_stats_accuracy);
COUNT(@@global.innodb_defragment_stats_accuracy)
1
SET @@global.innodb_defragment_stats_accuracy = 1;
+Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_stats_accuracy;
@@global.innodb_defragment_stats_accuracy
1
SET @@global.innodb_defragment_stats_accuracy = 1000;
+Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
SELECT @@global.innodb_defragment_stats_accuracy;
@@global.innodb_defragment_stats_accuracy
1000
SET @@global.innodb_defragment_stats_accuracy = -1;
Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_stats_accuracy value: '-1'
SELECT @@global.innodb_defragment_stats_accuracy;
@@global.innodb_defragment_stats_accuracy
0
SET @@global.innodb_defragment_stats_accuracy = 1000000000000;
Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
Warning 1292 Truncated incorrect innodb_defragment_stats_accuracy value: '1000000000000'
SELECT @@global.innodb_defragment_stats_accuracy;
@@global.innodb_defragment_stats_accuracy
@@ -31,3 +37,5 @@ SELECT @@global.innodb_defragment_stats_accuracy;
@@global.innodb_defragment_stats_accuracy
4294967295
SET @@global.innodb_defragment_stats_accuracy = @start_innodb_defragment_stats_accuracy;
+Warnings:
+Warning 1287 '@@innodb_defragment_stats_accuracy' is deprecated and will be removed in a future release
diff --git a/mysql-test/suite/sys_vars/r/innodb_fil_make_page_dirty_debug_basic.result b/mysql-test/suite/sys_vars/r/innodb_fil_make_page_dirty_debug_basic.result
index 477eb7fcb61..abf2cdaf1c4 100644
--- a/mysql-test/suite/sys_vars/r/innodb_fil_make_page_dirty_debug_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_fil_make_page_dirty_debug_basic.result
@@ -17,8 +17,6 @@ ERROR HY000: Variable 'innodb_fil_make_page_dirty_debug' is a GLOBAL variable an
create table t1 (f1 int primary key) engine = innodb;
select space from information_schema.innodb_sys_tables
where name = 'test/t1' into @space_id;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
set global innodb_saved_page_number_debug = 0;
set global innodb_fil_make_page_dirty_debug = @space_id;
drop table t1;
diff --git a/mysql-test/suite/sys_vars/r/innodb_file_per_table_basic.result b/mysql-test/suite/sys_vars/r/innodb_file_per_table_basic.result
index 9780357e69f..9d7555335a7 100644
--- a/mysql-test/suite/sys_vars/r/innodb_file_per_table_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_file_per_table_basic.result
@@ -9,18 +9,26 @@ COUNT(@@GLOBAL.innodb_file_per_table)
1 Expected
'#---------------------BS_STVARS_028_02----------------------#'
SET @@global.innodb_file_per_table = 0;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
SELECT @@global.innodb_file_per_table;
@@global.innodb_file_per_table
0
SET @@global.innodb_file_per_table ='On' ;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
SELECT @@global.innodb_file_per_table;
@@global.innodb_file_per_table
1
SET @@global.innodb_file_per_table ='Off' ;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
SELECT @@global.innodb_file_per_table;
@@global.innodb_file_per_table
0
SET @@global.innodb_file_per_table = 1;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
SELECT @@global.innodb_file_per_table;
@@global.innodb_file_per_table
1
@@ -64,6 +72,8 @@ COUNT(@@GLOBAL.innodb_file_per_table)
SELECT innodb_file_per_table = @@SESSION.innodb_file_per_table;
ERROR 42S22: Unknown column 'innodb_file_per_table' in 'field list'
SET @@global.innodb_file_per_table = @start_global_value;
+Warnings:
+Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release
SELECT @@global.innodb_file_per_table;
@@global.innodb_file_per_table
1
diff --git a/mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit,32bit.rdiff b/mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit,32bit.rdiff
index cd9a004a686..79e15420af3 100644
--- a/mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit,32bit.rdiff
+++ b/mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit,32bit.rdiff
@@ -1,5 +1,5 @@
---- mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit.result 2019-05-07 15:09:57.220599318 +0530
-+++ mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit.reject 2019-05-07 15:10:20.012718538 +0530
+--- mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit.result
++++ mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit.reject
@@ -1,5 +1,7 @@
set global innodb_ft_result_cache_limit=5000000000;
+Warnings:
diff --git a/mysql-test/suite/sys_vars/r/innodb_saved_page_number_debug_basic.result b/mysql-test/suite/sys_vars/r/innodb_saved_page_number_debug_basic.result
index b306749dcb0..20e2b78e640 100644
--- a/mysql-test/suite/sys_vars/r/innodb_saved_page_number_debug_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_saved_page_number_debug_basic.result
@@ -17,8 +17,6 @@ ERROR HY000: Variable 'innodb_saved_page_number_debug' is a GLOBAL variable and
create table t1 (f1 int primary key) engine = innodb;
select space from information_schema.innodb_sys_tables
where name = 'test/t1' into @space_id;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
set global innodb_saved_page_number_debug = 0;
set global innodb_fil_make_page_dirty_debug = @space_id;
drop table t1;
diff --git a/mysql-test/suite/sys_vars/r/log_bin_compress_grant.result b/mysql-test/suite/sys_vars/r/log_bin_compress_grant.result
index f75f22a75f8..1f0220a8093 100644
--- a/mysql-test/suite/sys_vars/r/log_bin_compress_grant.result
+++ b/mysql-test/suite/sys_vars/r/log_bin_compress_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.log_bin_compress;
-# Test that "SET log_bin_compress" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET log_bin_compress" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL log_bin_compress=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET log_bin_compress=1;
ERROR HY000: Variable 'log_bin_compress' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION log_bin_compress=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'log_bin_compress' is a GLOBAL variable and should be set
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET log_bin_compress" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL log_bin_compress=1;
-SET log_bin_compress=1;
-ERROR HY000: Variable 'log_bin_compress' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION log_bin_compress=1;
-ERROR HY000: Variable 'log_bin_compress' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.log_bin_compress=@global;
diff --git a/mysql-test/suite/sys_vars/r/log_bin_compress_min_len_grant.result b/mysql-test/suite/sys_vars/r/log_bin_compress_min_len_grant.result
index b1ccafb1dd2..2f4590b4b00 100644
--- a/mysql-test/suite/sys_vars/r/log_bin_compress_min_len_grant.result
+++ b/mysql-test/suite/sys_vars/r/log_bin_compress_min_len_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.log_bin_compress_min_len;
-# Test that "SET log_bin_compress_min_len" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET log_bin_compress_min_len" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL log_bin_compress_min_len=512;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET log_bin_compress_min_len=512;
ERROR HY000: Variable 'log_bin_compress_min_len' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION log_bin_compress_min_len=512;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'log_bin_compress_min_len' is a GLOBAL variable and should
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET log_bin_compress_min_len" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL log_bin_compress_min_len=512;
-SET log_bin_compress_min_len=512;
-ERROR HY000: Variable 'log_bin_compress_min_len' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION log_bin_compress_min_len=512;
-ERROR HY000: Variable 'log_bin_compress_min_len' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.log_bin_compress_min_len=@global;
diff --git a/mysql-test/suite/sys_vars/r/log_bin_trust_function_creators_grant.result b/mysql-test/suite/sys_vars/r/log_bin_trust_function_creators_grant.result
index ef9af94d8f6..5d3948bfa44 100644
--- a/mysql-test/suite/sys_vars/r/log_bin_trust_function_creators_grant.result
+++ b/mysql-test/suite/sys_vars/r/log_bin_trust_function_creators_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.log_bin_trust_function_creators;
-# Test that "SET log_bin_trust_function_creators" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET log_bin_trust_function_creators" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL log_bin_trust_function_creators=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET log_bin_trust_function_creators=1;
ERROR HY000: Variable 'log_bin_trust_function_creators' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION log_bin_trust_function_creators=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'log_bin_trust_function_creators' is a GLOBAL variable and
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET log_bin_trust_function_creators" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL log_bin_trust_function_creators=1;
-SET log_bin_trust_function_creators=1;
-ERROR HY000: Variable 'log_bin_trust_function_creators' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION log_bin_trust_function_creators=1;
-ERROR HY000: Variable 'log_bin_trust_function_creators' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.log_bin_trust_function_creators=@global;
diff --git a/mysql-test/suite/sys_vars/r/log_slow_admin_statements_func.result b/mysql-test/suite/sys_vars/r/log_slow_admin_statements_func.result
index 95916d8ce15..41a15e43356 100644
--- a/mysql-test/suite/sys_vars/r/log_slow_admin_statements_func.result
+++ b/mysql-test/suite/sys_vars/r/log_slow_admin_statements_func.result
@@ -11,6 +11,8 @@ SET GLOBAL log_output = 'file,table';
SET GLOBAL slow_query_log = on;
SET SESSION long_query_time = 0;
SET SESSION log_slow_admin_statements = on;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
ALTER TABLE log_slow_admin_statements ADD COLUMN k INT DEFAULT 17;
CREATE PROCEDURE add_rows()
BEGIN
@@ -42,22 +44,32 @@ SET @@global.log_output= @old_log_output;
SET @@global.slow_query_log= @old_slow_query_log;
SET @@session.long_query_time= @old_long_query_time;
SET @@global.log_slow_admin_statements= @old_log_slow_admin_statements;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
DROP PROCEDURE add_rows;
TRUNCATE TABLE mysql.slow_log;
SET @save_log_slow_disabled_statements= @@global.log_slow_disabled_statements;
SET @@SESSION.log_slow_admin_statements= TRUE;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
select @@SESSION.log_slow_disabled_statements;
@@SESSION.log_slow_disabled_statements
sp
SET @@SESSION.log_slow_admin_statements= FALSE;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
select @@SESSION.log_slow_disabled_statements;
@@SESSION.log_slow_disabled_statements
admin,sp
SET @@GLOBAL.log_slow_admin_statements= TRUE;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
select @@GLOBAL.log_slow_disabled_statements;
@@GLOBAL.log_slow_disabled_statements
sp
SET @@GLOBAL.log_slow_admin_statements= FALSE;
+Warnings:
+Warning 1287 '@@log_slow_admin_statements' is deprecated and will be removed in a future release. Please use '@@log_slow_filter' instead
select @@GLOBAL.log_slow_disabled_statements;
@@GLOBAL.log_slow_disabled_statements
admin,sp
diff --git a/mysql-test/suite/sys_vars/r/master_verify_checksum_grant.result b/mysql-test/suite/sys_vars/r/master_verify_checksum_grant.result
index 4791e4cd7e0..8d773f35855 100644
--- a/mysql-test/suite/sys_vars/r/master_verify_checksum_grant.result
+++ b/mysql-test/suite/sys_vars/r/master_verify_checksum_grant.result
@@ -2,14 +2,14 @@
# MDEV-21972 Bind REPLICATION MASTER ADMIN to master_verify_checksum
#
SET @global=@@global.master_verify_checksum;
-# Test that "SET master_verify_checksum" is not allowed without REPLICATION MASTER ADMIN or SUPER
+# Test that "SET master_verify_checksum" is not allowed without REPLICATION MASTER ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION MASTER ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION MASTER ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL master_verify_checksum=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION MASTER ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION MASTER ADMIN privilege(s) for this operation
SET master_verify_checksum=1;
ERROR HY000: Variable 'master_verify_checksum' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION master_verify_checksum=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'master_verify_checksum' is a GLOBAL variable and should b
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET master_verify_checksum" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL master_verify_checksum=1;
-SET master_verify_checksum=1;
-ERROR HY000: Variable 'master_verify_checksum' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION master_verify_checksum=1;
-ERROR HY000: Variable 'master_verify_checksum' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.master_verify_checksum=@global;
diff --git a/mysql-test/suite/sys_vars/r/max_binlog_cache_size_grant.result b/mysql-test/suite/sys_vars/r/max_binlog_cache_size_grant.result
index 350194c46cc..f1108553b4e 100644
--- a/mysql-test/suite/sys_vars/r/max_binlog_cache_size_grant.result
+++ b/mysql-test/suite/sys_vars/r/max_binlog_cache_size_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.max_binlog_cache_size;
-# Test that "SET max_binlog_cache_size" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET max_binlog_cache_size" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL max_binlog_cache_size=4096;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET max_binlog_cache_size=4096;
ERROR HY000: Variable 'max_binlog_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION max_binlog_cache_size=4096;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'max_binlog_cache_size' is a GLOBAL variable and should be
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET max_binlog_cache_size" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL max_binlog_cache_size=4096;
-SET max_binlog_cache_size=4096;
-ERROR HY000: Variable 'max_binlog_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION max_binlog_cache_size=4096;
-ERROR HY000: Variable 'max_binlog_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.max_binlog_cache_size=@global;
diff --git a/mysql-test/suite/sys_vars/r/max_binlog_size_grant.result b/mysql-test/suite/sys_vars/r/max_binlog_size_grant.result
index 34e1fde76fe..6937e82d3d0 100644
--- a/mysql-test/suite/sys_vars/r/max_binlog_size_grant.result
+++ b/mysql-test/suite/sys_vars/r/max_binlog_size_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.max_binlog_size;
-# Test that "SET max_binlog_size" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET max_binlog_size" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL max_binlog_size=4096;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET max_binlog_size=4096;
ERROR HY000: Variable 'max_binlog_size' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION max_binlog_size=4096;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'max_binlog_size' is a GLOBAL variable and should be set w
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET max_binlog_size" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL max_binlog_size=4096;
-SET max_binlog_size=4096;
-ERROR HY000: Variable 'max_binlog_size' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION max_binlog_size=4096;
-ERROR HY000: Variable 'max_binlog_size' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.max_binlog_size=@global;
diff --git a/mysql-test/suite/sys_vars/r/max_binlog_stmt_cache_size_grant.result b/mysql-test/suite/sys_vars/r/max_binlog_stmt_cache_size_grant.result
index 2ddd164f7c1..c6a01661080 100644
--- a/mysql-test/suite/sys_vars/r/max_binlog_stmt_cache_size_grant.result
+++ b/mysql-test/suite/sys_vars/r/max_binlog_stmt_cache_size_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.max_binlog_stmt_cache_size;
-# Test that "SET max_binlog_stmt_cache_size" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET max_binlog_stmt_cache_size" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL max_binlog_stmt_cache_size=4096;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET max_binlog_stmt_cache_size=4096;
ERROR HY000: Variable 'max_binlog_stmt_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION max_binlog_stmt_cache_size=4096;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'max_binlog_stmt_cache_size' is a GLOBAL variable and shou
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET max_binlog_stmt_cache_size" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL max_binlog_stmt_cache_size=4096;
-SET max_binlog_stmt_cache_size=4096;
-ERROR HY000: Variable 'max_binlog_stmt_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION max_binlog_stmt_cache_size=4096;
-ERROR HY000: Variable 'max_binlog_stmt_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.max_binlog_stmt_cache_size=@global;
diff --git a/mysql-test/suite/sys_vars/r/max_connect_errors_grant.result b/mysql-test/suite/sys_vars/r/max_connect_errors_grant.result
index 527a4ecaf72..be145cb6139 100644
--- a/mysql-test/suite/sys_vars/r/max_connect_errors_grant.result
+++ b/mysql-test/suite/sys_vars/r/max_connect_errors_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.max_connect_errors;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET max_connect_errors" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET max_connect_errors" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL max_connect_errors=10;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET max_connect_errors=10;
ERROR HY000: Variable 'max_connect_errors' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION max_connect_errors=10;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'max_connect_errors' is a GLOBAL variable and should be se
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET max_connect_errors" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL max_connect_errors=10;
-SET max_connect_errors=10;
-ERROR HY000: Variable 'max_connect_errors' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION max_connect_errors=10;
-ERROR HY000: Variable 'max_connect_errors' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.max_connect_errors=@global;
diff --git a/mysql-test/suite/sys_vars/r/max_connections_grant.result b/mysql-test/suite/sys_vars/r/max_connections_grant.result
index e55904d4cc1..7cd6784733c 100644
--- a/mysql-test/suite/sys_vars/r/max_connections_grant.result
+++ b/mysql-test/suite/sys_vars/r/max_connections_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.max_connections;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET max_connections" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET max_connections" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL max_connections=10;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET max_connections=10;
ERROR HY000: Variable 'max_connections' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION max_connections=10;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'max_connections' is a GLOBAL variable and should be set w
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET max_connections" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL max_connections=10;
-SET max_connections=10;
-ERROR HY000: Variable 'max_connections' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION max_connections=10;
-ERROR HY000: Variable 'max_connections' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.max_connections=@global;
diff --git a/mysql-test/suite/sys_vars/r/max_join_size_basic.result b/mysql-test/suite/sys_vars/r/max_join_size_basic.result
index b87de2bc45e..784d0d0f274 100644
--- a/mysql-test/suite/sys_vars/r/max_join_size_basic.result
+++ b/mysql-test/suite/sys_vars/r/max_join_size_basic.result
@@ -21,25 +21,25 @@ select * from information_schema.session_variables where variable_name='max_join
VARIABLE_NAME VARIABLE_VALUE
MAX_JOIN_SIZE 18446744073709551615
set global max_join_size=10;
-set session max_join_size=20;
+set session max_join_size=100;
select @@global.max_join_size;
@@global.max_join_size
10
select @@session.max_join_size;
@@session.max_join_size
-20
+100
show global variables like 'max_join_size';
Variable_name Value
max_join_size 10
show session variables like 'max_join_size';
Variable_name Value
-max_join_size 20
+max_join_size 100
select * from information_schema.global_variables where variable_name='max_join_size';
VARIABLE_NAME VARIABLE_VALUE
MAX_JOIN_SIZE 10
select * from information_schema.session_variables where variable_name='max_join_size';
VARIABLE_NAME VARIABLE_VALUE
-MAX_JOIN_SIZE 20
+MAX_JOIN_SIZE 100
set global max_join_size=1.1;
ERROR 42000: Incorrect argument type to variable 'max_join_size'
set global max_join_size=1e1;
diff --git a/mysql-test/suite/sys_vars/r/max_join_size_func.result b/mysql-test/suite/sys_vars/r/max_join_size_func.result
index cacc918ea02..d46b89d1f44 100644
--- a/mysql-test/suite/sys_vars/r/max_join_size_func.result
+++ b/mysql-test/suite/sys_vars/r/max_join_size_func.result
@@ -39,19 +39,19 @@ id name id name
connect test_con1, localhost, root,,;
connection test_con1;
## Setting value of max_join_size ##
-SET @@session.max_join_size=8;
+SET @@session.max_join_size=4;
## Since total joins are more than max_join_size value so error will occur ##
SELECT * FROM t1 INNER JOIN t2 ON t1.id = t2.id;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
'#--------------------FN_DYNVARS_079_03-------------------------#'
## Setting global value of variable ##
-SET @@global.max_join_size=8;
+SET @@global.max_join_size=4;
connect test_con2, localhost, root,,;
connection test_con2;
## Verifying value of max_join_size ##
SELECT @@global.max_join_size;
@@global.max_join_size
-8
+4
## Since total joins are more than max_join_size value so error will occur ##
SELECT * FROM t1 INNER JOIN t2 ON t1.id = t2.id;
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
diff --git a/mysql-test/suite/sys_vars/r/max_password_errors_grant.result b/mysql-test/suite/sys_vars/r/max_password_errors_grant.result
index 4ae0dfb887a..58817d3c4d6 100644
--- a/mysql-test/suite/sys_vars/r/max_password_errors_grant.result
+++ b/mysql-test/suite/sys_vars/r/max_password_errors_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.max_password_errors;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET max_password_errors" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET max_password_errors" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL max_password_errors=10;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET max_password_errors=10;
ERROR HY000: Variable 'max_password_errors' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION max_password_errors=10;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'max_password_errors' is a GLOBAL variable and should be s
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET max_password_errors" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL max_password_errors=10;
-SET max_password_errors=10;
-ERROR HY000: Variable 'max_password_errors' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION max_password_errors=10;
-ERROR HY000: Variable 'max_password_errors' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.max_password_errors=@global;
diff --git a/mysql-test/suite/sys_vars/r/preudo_thread_id_grant.result b/mysql-test/suite/sys_vars/r/preudo_thread_id_grant.result
index d306a192727..8ac8e32d673 100644
--- a/mysql-test/suite/sys_vars/r/preudo_thread_id_grant.result
+++ b/mysql-test/suite/sys_vars/r/preudo_thread_id_grant.result
@@ -2,18 +2,18 @@
# MDEV-21975 Add BINLOG REPLAY privilege and bind new privileges to gtid_seq_no, preudo_thread_id, server_id, gtid_domain_id
#
SET @session=@@session.pseudo_thread_id;
-# Test that "SET pseudo_thread_id" is not allowed without BINLOG REPLAY or SUPER
+# Test that "SET pseudo_thread_id" is not allowed without BINLOG REPLAY
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG REPLAY, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG REPLAY ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL pseudo_thread_id=1;
ERROR HY000: Variable 'pseudo_thread_id' is a SESSION variable and can't be used with SET GLOBAL
SET pseudo_thread_id=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation
SET SESSION pseudo_thread_id=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -29,16 +29,4 @@ SET SESSION pseudo_thread_id=1;
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET pseudo_thread_id" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL pseudo_thread_id=1;
-ERROR HY000: Variable 'pseudo_thread_id' is a SESSION variable and can't be used with SET GLOBAL
-SET pseudo_thread_id=1;
-SET SESSION pseudo_thread_id=1;
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@session.pseudo_thread_id=@session;
diff --git a/mysql-test/suite/sys_vars/r/proxy_protocol_networks_grant.result b/mysql-test/suite/sys_vars/r/proxy_protocol_networks_grant.result
index b6bae272443..90419a80a99 100644
--- a/mysql-test/suite/sys_vars/r/proxy_protocol_networks_grant.result
+++ b/mysql-test/suite/sys_vars/r/proxy_protocol_networks_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.proxy_protocol_networks;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET proxy_protocol_networks" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET proxy_protocol_networks" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL proxy_protocol_networks="";
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET proxy_protocol_networks="";
ERROR HY000: Variable 'proxy_protocol_networks' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION proxy_protocol_networks="";
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'proxy_protocol_networks' is a GLOBAL variable and should
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET proxy_protocol_networks" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL proxy_protocol_networks="";
-SET proxy_protocol_networks="";
-ERROR HY000: Variable 'proxy_protocol_networks' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION proxy_protocol_networks="";
-ERROR HY000: Variable 'proxy_protocol_networks' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.proxy_protocol_networks=@global;
diff --git a/mysql-test/suite/sys_vars/r/read_binlog_speed_limit_grant.result b/mysql-test/suite/sys_vars/r/read_binlog_speed_limit_grant.result
index 03536c1c371..73a2127e33f 100644
--- a/mysql-test/suite/sys_vars/r/read_binlog_speed_limit_grant.result
+++ b/mysql-test/suite/sys_vars/r/read_binlog_speed_limit_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.read_binlog_speed_limit;
-# Test that "SET read_binlog_speed_limit" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET read_binlog_speed_limit" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL read_binlog_speed_limit=65536;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET read_binlog_speed_limit=65536;
ERROR HY000: Variable 'read_binlog_speed_limit' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION read_binlog_speed_limit=65536;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'read_binlog_speed_limit' is a GLOBAL variable and should
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET read_binlog_speed_limit" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL read_binlog_speed_limit=65536;
-SET read_binlog_speed_limit=65536;
-ERROR HY000: Variable 'read_binlog_speed_limit' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION read_binlog_speed_limit=65536;
-ERROR HY000: Variable 'read_binlog_speed_limit' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.read_binlog_speed_limit=@global;
diff --git a/mysql-test/suite/sys_vars/r/read_only_grant.result b/mysql-test/suite/sys_vars/r/read_only_grant.result
index 455390eb4fc..fd908b0e52d 100644
--- a/mysql-test/suite/sys_vars/r/read_only_grant.result
+++ b/mysql-test/suite/sys_vars/r/read_only_grant.result
@@ -36,5 +36,5 @@ insert mysql.global_priv values ('bar', 'foo', '{"access":32768,"version_id":101
flush privileges;
show grants for foo@bar;
Grants for foo@bar
-GRANT SUPER, READ_ONLY ADMIN ON *.* TO `foo`@`bar`
+GRANT SUPER, BINLOG MONITOR, SET USER, FEDERATED ADMIN, CONNECTION ADMIN, READ_ONLY ADMIN, REPLICATION SLAVE ADMIN, REPLICATION MASTER ADMIN, BINLOG ADMIN, BINLOG REPLAY, SLAVE MONITOR ON *.* TO `foo`@`bar`
drop user foo@bar;
diff --git a/mysql-test/suite/sys_vars/r/relay_log_purge_grant.result b/mysql-test/suite/sys_vars/r/relay_log_purge_grant.result
index 0823bf1ff9d..b22b86f2b36 100644
--- a/mysql-test/suite/sys_vars/r/relay_log_purge_grant.result
+++ b/mysql-test/suite/sys_vars/r/relay_log_purge_grant.result
@@ -2,14 +2,14 @@
# MDEV-21969 Bind REPLICATION SLAVE ADMIN to relay_log_*, sync_master_info, sync_relay_log, sync_relay_log_info
#
SET @global=@@global.relay_log_purge;
-# Test that "SET relay_log_purge" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET relay_log_purge" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL relay_log_purge=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET relay_log_purge=1;
ERROR HY000: Variable 'relay_log_purge' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION relay_log_purge=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'relay_log_purge' is a GLOBAL variable and should be set w
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET relay_log_purge" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL relay_log_purge=1;
-SET relay_log_purge=1;
-ERROR HY000: Variable 'relay_log_purge' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION relay_log_purge=1;
-ERROR HY000: Variable 'relay_log_purge' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.relay_log_purge=@global;
diff --git a/mysql-test/suite/sys_vars/r/relay_log_recovery_grant.result b/mysql-test/suite/sys_vars/r/relay_log_recovery_grant.result
index 9e90c99ff0d..14e2ef0e3b9 100644
--- a/mysql-test/suite/sys_vars/r/relay_log_recovery_grant.result
+++ b/mysql-test/suite/sys_vars/r/relay_log_recovery_grant.result
@@ -2,14 +2,14 @@
# MDEV-21969 Bind REPLICATION SLAVE ADMIN to relay_log_*, sync_master_info, sync_relay_log, sync_relay_log_info
#
SET @global=@@global.relay_log_recovery;
-# Test that "SET relay_log_recovery" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET relay_log_recovery" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL relay_log_recovery=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET relay_log_recovery=1;
ERROR HY000: Variable 'relay_log_recovery' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION relay_log_recovery=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'relay_log_recovery' is a GLOBAL variable and should be se
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET relay_log_recovery" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL relay_log_recovery=1;
-SET relay_log_recovery=1;
-ERROR HY000: Variable 'relay_log_recovery' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION relay_log_recovery=1;
-ERROR HY000: Variable 'relay_log_recovery' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.relay_log_recovery=@global;
diff --git a/mysql-test/suite/sys_vars/r/replicate_do_db_grant.result b/mysql-test/suite/sys_vars/r/replicate_do_db_grant.result
index 1f352590072..e5418dd627b 100644
--- a/mysql-test/suite/sys_vars/r/replicate_do_db_grant.result
+++ b/mysql-test/suite/sys_vars/r/replicate_do_db_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.replicate_do_db;
-# Test that "SET replicate_do_db" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET replicate_do_db" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL replicate_do_db='';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET replicate_do_db='';
ERROR HY000: Variable 'replicate_do_db' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION replicate_do_db='';
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'replicate_do_db' is a GLOBAL variable and should be set w
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET replicate_do_db" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL replicate_do_db='';
-SET replicate_do_db='';
-ERROR HY000: Variable 'replicate_do_db' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION replicate_do_db='';
-ERROR HY000: Variable 'replicate_do_db' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.replicate_do_db=@global;
diff --git a/mysql-test/suite/sys_vars/r/replicate_do_table_grant.result b/mysql-test/suite/sys_vars/r/replicate_do_table_grant.result
index 31290979695..37b5450f677 100644
--- a/mysql-test/suite/sys_vars/r/replicate_do_table_grant.result
+++ b/mysql-test/suite/sys_vars/r/replicate_do_table_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.replicate_do_table;
-# Test that "SET replicate_do_table" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET replicate_do_table" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL replicate_do_table='';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET replicate_do_table='';
ERROR HY000: Variable 'replicate_do_table' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION replicate_do_table='';
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'replicate_do_table' is a GLOBAL variable and should be se
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET replicate_do_table" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL replicate_do_table='';
-SET replicate_do_table='';
-ERROR HY000: Variable 'replicate_do_table' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION replicate_do_table='';
-ERROR HY000: Variable 'replicate_do_table' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.replicate_do_table=@global;
diff --git a/mysql-test/suite/sys_vars/r/replicate_events_marked_for_skip_grant.result b/mysql-test/suite/sys_vars/r/replicate_events_marked_for_skip_grant.result
index f649294f7e5..250be74b247 100644
--- a/mysql-test/suite/sys_vars/r/replicate_events_marked_for_skip_grant.result
+++ b/mysql-test/suite/sys_vars/r/replicate_events_marked_for_skip_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.replicate_events_marked_for_skip;
-# Test that "SET replicate_events_marked_for_skip" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET replicate_events_marked_for_skip" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET replicate_events_marked_for_skip=REPLICATE;
ERROR HY000: Variable 'replicate_events_marked_for_skip' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION replicate_events_marked_for_skip=REPLICATE;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'replicate_events_marked_for_skip' is a GLOBAL variable an
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET replicate_events_marked_for_skip" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL replicate_events_marked_for_skip=REPLICATE;
-SET replicate_events_marked_for_skip=REPLICATE;
-ERROR HY000: Variable 'replicate_events_marked_for_skip' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION replicate_events_marked_for_skip=REPLICATE;
-ERROR HY000: Variable 'replicate_events_marked_for_skip' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.replicate_events_marked_for_skip=@global;
diff --git a/mysql-test/suite/sys_vars/r/replicate_ignore_db_grant.result b/mysql-test/suite/sys_vars/r/replicate_ignore_db_grant.result
index 612fab62771..6086f363a9c 100644
--- a/mysql-test/suite/sys_vars/r/replicate_ignore_db_grant.result
+++ b/mysql-test/suite/sys_vars/r/replicate_ignore_db_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.replicate_ignore_db;
-# Test that "SET replicate_ignore_db" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET replicate_ignore_db" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL replicate_ignore_db='';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET replicate_ignore_db='';
ERROR HY000: Variable 'replicate_ignore_db' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION replicate_ignore_db='';
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'replicate_ignore_db' is a GLOBAL variable and should be s
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET replicate_ignore_db" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL replicate_ignore_db='';
-SET replicate_ignore_db='';
-ERROR HY000: Variable 'replicate_ignore_db' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION replicate_ignore_db='';
-ERROR HY000: Variable 'replicate_ignore_db' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.replicate_ignore_db=@global;
diff --git a/mysql-test/suite/sys_vars/r/replicate_ignore_table_grant.result b/mysql-test/suite/sys_vars/r/replicate_ignore_table_grant.result
index 9f2354a8704..b2ff338486e 100644
--- a/mysql-test/suite/sys_vars/r/replicate_ignore_table_grant.result
+++ b/mysql-test/suite/sys_vars/r/replicate_ignore_table_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.replicate_ignore_table;
-# Test that "SET replicate_ignore_table" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET replicate_ignore_table" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL replicate_ignore_table='';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET replicate_ignore_table='';
ERROR HY000: Variable 'replicate_ignore_table' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION replicate_ignore_table='';
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'replicate_ignore_table' is a GLOBAL variable and should b
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET replicate_ignore_table" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL replicate_ignore_table='';
-SET replicate_ignore_table='';
-ERROR HY000: Variable 'replicate_ignore_table' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION replicate_ignore_table='';
-ERROR HY000: Variable 'replicate_ignore_table' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.replicate_ignore_table=@global;
diff --git a/mysql-test/suite/sys_vars/r/replicate_wild_do_table_grant.result b/mysql-test/suite/sys_vars/r/replicate_wild_do_table_grant.result
index 1c9e12ded65..c09ef6ecce1 100644
--- a/mysql-test/suite/sys_vars/r/replicate_wild_do_table_grant.result
+++ b/mysql-test/suite/sys_vars/r/replicate_wild_do_table_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.replicate_wild_do_table;
-# Test that "SET replicate_wild_do_table" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET replicate_wild_do_table" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL replicate_wild_do_table='';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET replicate_wild_do_table='';
ERROR HY000: Variable 'replicate_wild_do_table' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION replicate_wild_do_table='';
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'replicate_wild_do_table' is a GLOBAL variable and should
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET replicate_wild_do_table" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL replicate_wild_do_table='';
-SET replicate_wild_do_table='';
-ERROR HY000: Variable 'replicate_wild_do_table' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION replicate_wild_do_table='';
-ERROR HY000: Variable 'replicate_wild_do_table' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.replicate_wild_do_table=@global;
diff --git a/mysql-test/suite/sys_vars/r/replicate_wild_ignore_table_grant.result b/mysql-test/suite/sys_vars/r/replicate_wild_ignore_table_grant.result
index 27a82173d50..2f5998e19d0 100644
--- a/mysql-test/suite/sys_vars/r/replicate_wild_ignore_table_grant.result
+++ b/mysql-test/suite/sys_vars/r/replicate_wild_ignore_table_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.replicate_wild_ignore_table;
-# Test that "SET replicate_wild_ignore_table" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET replicate_wild_ignore_table" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL replicate_wild_ignore_table='';
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET replicate_wild_ignore_table='';
ERROR HY000: Variable 'replicate_wild_ignore_table' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION replicate_wild_ignore_table='';
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'replicate_wild_ignore_table' is a GLOBAL variable and sho
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET replicate_wild_ignore_table" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL replicate_wild_ignore_table='';
-SET replicate_wild_ignore_table='';
-ERROR HY000: Variable 'replicate_wild_ignore_table' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION replicate_wild_ignore_table='';
-ERROR HY000: Variable 'replicate_wild_ignore_table' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.replicate_wild_ignore_table=@global;
diff --git a/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_enabled_grant.result b/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_enabled_grant.result
index 3c5b3d070d5..803a5551df2 100644
--- a/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_enabled_grant.result
+++ b/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_enabled_grant.result
@@ -2,14 +2,14 @@
# MDEV-21967 Bind REPLICATION {MASTER|SLAVE} ADMIN to rpl_semi_sync_* variables
#
SET @global=@@global.rpl_semi_sync_master_enabled;
-# Test that "SET rpl_semi_sync_master_enabled" is not allowed without REPLICATION MASTER ADMIN or SUPER
+# Test that "SET rpl_semi_sync_master_enabled" is not allowed without REPLICATION MASTER ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION MASTER ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION MASTER ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL rpl_semi_sync_master_enabled=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION MASTER ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION MASTER ADMIN privilege(s) for this operation
SET rpl_semi_sync_master_enabled=1;
ERROR HY000: Variable 'rpl_semi_sync_master_enabled' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION rpl_semi_sync_master_enabled=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'rpl_semi_sync_master_enabled' is a GLOBAL variable and sh
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET rpl_semi_sync_master_enabled" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL rpl_semi_sync_master_enabled=1;
-SET rpl_semi_sync_master_enabled=1;
-ERROR HY000: Variable 'rpl_semi_sync_master_enabled' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION rpl_semi_sync_master_enabled=1;
-ERROR HY000: Variable 'rpl_semi_sync_master_enabled' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.rpl_semi_sync_master_enabled=@global;
diff --git a/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_timeout_grant.result b/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_timeout_grant.result
index 2e8b51d5fa8..be12c0ff84d 100644
--- a/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_timeout_grant.result
+++ b/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_timeout_grant.result
@@ -2,14 +2,14 @@
# MDEV-21967 Bind REPLICATION {MASTER|SLAVE} ADMIN to rpl_semi_sync_* variables
#
SET @global=@@global.rpl_semi_sync_master_timeout;
-# Test that "SET rpl_semi_sync_master_timeout" is not allowed without REPLICATION MASTER ADMIN or SUPER
+# Test that "SET rpl_semi_sync_master_timeout" is not allowed without REPLICATION MASTER ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION MASTER ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION MASTER ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL rpl_semi_sync_master_timeout=20000;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION MASTER ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION MASTER ADMIN privilege(s) for this operation
SET rpl_semi_sync_master_timeout=20000;
ERROR HY000: Variable 'rpl_semi_sync_master_timeout' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION rpl_semi_sync_master_timeout=20000;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'rpl_semi_sync_master_timeout' is a GLOBAL variable and sh
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET rpl_semi_sync_master_timeout" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL rpl_semi_sync_master_timeout=20000;
-SET rpl_semi_sync_master_timeout=20000;
-ERROR HY000: Variable 'rpl_semi_sync_master_timeout' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION rpl_semi_sync_master_timeout=20000;
-ERROR HY000: Variable 'rpl_semi_sync_master_timeout' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.rpl_semi_sync_master_timeout=@global;
diff --git a/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_trace_level_grant.result b/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_trace_level_grant.result
index c31d063f1d9..2c5a7e65041 100644
--- a/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_trace_level_grant.result
+++ b/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_trace_level_grant.result
@@ -2,14 +2,14 @@
# MDEV-21967 Bind REPLICATION {MASTER|SLAVE} ADMIN to rpl_semi_sync_* variables
#
SET @global=@@global.rpl_semi_sync_master_trace_level;
-# Test that "SET rpl_semi_sync_master_trace_level" is not allowed without REPLICATION MASTER ADMIN or SUPER
+# Test that "SET rpl_semi_sync_master_trace_level" is not allowed without REPLICATION MASTER ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION MASTER ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION MASTER ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL rpl_semi_sync_master_trace_level=64;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION MASTER ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION MASTER ADMIN privilege(s) for this operation
SET rpl_semi_sync_master_trace_level=64;
ERROR HY000: Variable 'rpl_semi_sync_master_trace_level' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION rpl_semi_sync_master_trace_level=64;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'rpl_semi_sync_master_trace_level' is a GLOBAL variable an
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET rpl_semi_sync_master_trace_level" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL rpl_semi_sync_master_trace_level=64;
-SET rpl_semi_sync_master_trace_level=64;
-ERROR HY000: Variable 'rpl_semi_sync_master_trace_level' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION rpl_semi_sync_master_trace_level=64;
-ERROR HY000: Variable 'rpl_semi_sync_master_trace_level' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.rpl_semi_sync_master_trace_level=@global;
diff --git a/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_wait_no_slave_grant.result b/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_wait_no_slave_grant.result
index 14e239f7c4c..7af4016a6f6 100644
--- a/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_wait_no_slave_grant.result
+++ b/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_wait_no_slave_grant.result
@@ -2,14 +2,14 @@
# MDEV-21967 Bind REPLICATION {MASTER|SLAVE} ADMIN to rpl_semi_sync_* variables
#
SET @global=@@global.rpl_semi_sync_master_wait_no_slave;
-# Test that "SET rpl_semi_sync_master_wait_no_slave" is not allowed without REPLICATION MASTER ADMIN or SUPER
+# Test that "SET rpl_semi_sync_master_wait_no_slave" is not allowed without REPLICATION MASTER ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION MASTER ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION MASTER ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL rpl_semi_sync_master_wait_no_slave=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION MASTER ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION MASTER ADMIN privilege(s) for this operation
SET rpl_semi_sync_master_wait_no_slave=1;
ERROR HY000: Variable 'rpl_semi_sync_master_wait_no_slave' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION rpl_semi_sync_master_wait_no_slave=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'rpl_semi_sync_master_wait_no_slave' is a GLOBAL variable
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET rpl_semi_sync_master_wait_no_slave" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL rpl_semi_sync_master_wait_no_slave=1;
-SET rpl_semi_sync_master_wait_no_slave=1;
-ERROR HY000: Variable 'rpl_semi_sync_master_wait_no_slave' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION rpl_semi_sync_master_wait_no_slave=1;
-ERROR HY000: Variable 'rpl_semi_sync_master_wait_no_slave' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.rpl_semi_sync_master_wait_no_slave=@global;
diff --git a/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_wait_point_grant.result b/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_wait_point_grant.result
index 5760c45900e..0ce01cdb7be 100644
--- a/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_wait_point_grant.result
+++ b/mysql-test/suite/sys_vars/r/rpl_semi_sync_master_wait_point_grant.result
@@ -2,14 +2,14 @@
# MDEV-21967 Bind REPLICATION {MASTER|SLAVE} ADMIN to rpl_semi_sync_* variables
#
SET @global=@@global.rpl_semi_sync_master_wait_point;
-# Test that "SET rpl_semi_sync_master_wait_point" is not allowed without REPLICATION MASTER ADMIN or SUPER
+# Test that "SET rpl_semi_sync_master_wait_point" is not allowed without REPLICATION MASTER ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION MASTER ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION MASTER ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL rpl_semi_sync_master_wait_point=AFTER_SYNC;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION MASTER ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION MASTER ADMIN privilege(s) for this operation
SET rpl_semi_sync_master_wait_point=AFTER_SYNC;
ERROR HY000: Variable 'rpl_semi_sync_master_wait_point' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION rpl_semi_sync_master_wait_point=AFTER_SYNC;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'rpl_semi_sync_master_wait_point' is a GLOBAL variable and
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET rpl_semi_sync_master_wait_point" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL rpl_semi_sync_master_wait_point=AFTER_SYNC;
-SET rpl_semi_sync_master_wait_point=AFTER_SYNC;
-ERROR HY000: Variable 'rpl_semi_sync_master_wait_point' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION rpl_semi_sync_master_wait_point=AFTER_SYNC;
-ERROR HY000: Variable 'rpl_semi_sync_master_wait_point' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.rpl_semi_sync_master_wait_point=@global;
diff --git a/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_delay_master_grant.result b/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_delay_master_grant.result
index 9e1c3073da9..9460df95c3a 100644
--- a/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_delay_master_grant.result
+++ b/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_delay_master_grant.result
@@ -2,14 +2,14 @@
# MDEV-21967 Bind REPLICATION {MASTER|SLAVE} ADMIN to rpl_semi_sync_* variables
#
SET @global=@@global.rpl_semi_sync_slave_delay_master;
-# Test that "SET rpl_semi_sync_slave_delay_master" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET rpl_semi_sync_slave_delay_master" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL rpl_semi_sync_slave_delay_master=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET rpl_semi_sync_slave_delay_master=1;
ERROR HY000: Variable 'rpl_semi_sync_slave_delay_master' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION rpl_semi_sync_slave_delay_master=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'rpl_semi_sync_slave_delay_master' is a GLOBAL variable an
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET rpl_semi_sync_slave_delay_master" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL rpl_semi_sync_slave_delay_master=1;
-SET rpl_semi_sync_slave_delay_master=1;
-ERROR HY000: Variable 'rpl_semi_sync_slave_delay_master' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION rpl_semi_sync_slave_delay_master=1;
-ERROR HY000: Variable 'rpl_semi_sync_slave_delay_master' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.rpl_semi_sync_slave_delay_master=@global;
diff --git a/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_enabled_grant.result b/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_enabled_grant.result
index f370b82d751..f99eee16122 100644
--- a/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_enabled_grant.result
+++ b/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_enabled_grant.result
@@ -2,14 +2,14 @@
# MDEV-21967 Bind REPLICATION {MASTER|SLAVE} ADMIN to rpl_semi_sync_* variables
#
SET @global=@@global.rpl_semi_sync_slave_enabled;
-# Test that "SET rpl_semi_sync_slave_enabled" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET rpl_semi_sync_slave_enabled" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL rpl_semi_sync_slave_enabled=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET rpl_semi_sync_slave_enabled=1;
ERROR HY000: Variable 'rpl_semi_sync_slave_enabled' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION rpl_semi_sync_slave_enabled=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'rpl_semi_sync_slave_enabled' is a GLOBAL variable and sho
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET rpl_semi_sync_slave_enabled" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL rpl_semi_sync_slave_enabled=1;
-SET rpl_semi_sync_slave_enabled=1;
-ERROR HY000: Variable 'rpl_semi_sync_slave_enabled' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION rpl_semi_sync_slave_enabled=1;
-ERROR HY000: Variable 'rpl_semi_sync_slave_enabled' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.rpl_semi_sync_slave_enabled=@global;
diff --git a/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_kill_conn_timeout_grant.result b/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_kill_conn_timeout_grant.result
index f5e0f3b3e3a..42eda8058fa 100644
--- a/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_kill_conn_timeout_grant.result
+++ b/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_kill_conn_timeout_grant.result
@@ -2,14 +2,14 @@
# MDEV-21967 Bind REPLICATION {MASTER|SLAVE} ADMIN to rpl_semi_sync_* variables
#
SET @global=@@global.rpl_semi_sync_slave_kill_conn_timeout;
-# Test that "SET rpl_semi_sync_slave_kill_conn_timeout" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET rpl_semi_sync_slave_kill_conn_timeout" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL rpl_semi_sync_slave_kill_conn_timeout=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET rpl_semi_sync_slave_kill_conn_timeout=1;
ERROR HY000: Variable 'rpl_semi_sync_slave_kill_conn_timeout' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION rpl_semi_sync_slave_kill_conn_timeout=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'rpl_semi_sync_slave_kill_conn_timeout' is a GLOBAL variab
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET rpl_semi_sync_slave_kill_conn_timeout" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL rpl_semi_sync_slave_kill_conn_timeout=1;
-SET rpl_semi_sync_slave_kill_conn_timeout=1;
-ERROR HY000: Variable 'rpl_semi_sync_slave_kill_conn_timeout' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION rpl_semi_sync_slave_kill_conn_timeout=1;
-ERROR HY000: Variable 'rpl_semi_sync_slave_kill_conn_timeout' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.rpl_semi_sync_slave_kill_conn_timeout=@global;
diff --git a/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_trace_level_grant.result b/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_trace_level_grant.result
index a1471906bba..9693731dc08 100644
--- a/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_trace_level_grant.result
+++ b/mysql-test/suite/sys_vars/r/rpl_semi_sync_slave_trace_level_grant.result
@@ -2,14 +2,14 @@
# MDEV-21967 Bind REPLICATION {MASTER|SLAVE} ADMIN to rpl_semi_sync_* variables
#
SET @global=@@global.rpl_semi_sync_slave_trace_level;
-# Test that "SET rpl_semi_sync_slave_trace_level" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET rpl_semi_sync_slave_trace_level" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL rpl_semi_sync_slave_trace_level=64;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET rpl_semi_sync_slave_trace_level=64;
ERROR HY000: Variable 'rpl_semi_sync_slave_trace_level' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION rpl_semi_sync_slave_trace_level=64;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'rpl_semi_sync_slave_trace_level' is a GLOBAL variable and
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET rpl_semi_sync_slave_trace_level" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL rpl_semi_sync_slave_trace_level=64;
-SET rpl_semi_sync_slave_trace_level=64;
-ERROR HY000: Variable 'rpl_semi_sync_slave_trace_level' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION rpl_semi_sync_slave_trace_level=64;
-ERROR HY000: Variable 'rpl_semi_sync_slave_trace_level' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.rpl_semi_sync_slave_trace_level=@global;
diff --git a/mysql-test/suite/sys_vars/r/secure_auth_grant.result b/mysql-test/suite/sys_vars/r/secure_auth_grant.result
index ed33eb33951..69e001ad77d 100644
--- a/mysql-test/suite/sys_vars/r/secure_auth_grant.result
+++ b/mysql-test/suite/sys_vars/r/secure_auth_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.secure_auth;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET secure_auth" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET secure_auth" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL secure_auth=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET secure_auth=1;
ERROR HY000: Variable 'secure_auth' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION secure_auth=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'secure_auth' is a GLOBAL variable and should be set with
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET secure_auth" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL secure_auth=1;
-SET secure_auth=1;
-ERROR HY000: Variable 'secure_auth' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION secure_auth=1;
-ERROR HY000: Variable 'secure_auth' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.secure_auth=@global;
diff --git a/mysql-test/suite/sys_vars/r/secure_file_priv.result b/mysql-test/suite/sys_vars/r/secure_file_priv.result
index 74f816df59d..eeeb9a58c0f 100644
--- a/mysql-test/suite/sys_vars/r/secure_file_priv.result
+++ b/mysql-test/suite/sys_vars/r/secure_file_priv.result
@@ -6,8 +6,6 @@ INSERT INTO t1 VALUES ("one"),("two"),("three"),("four"),("five");
SHOW VARIABLES LIKE 'secure_file_priv';
Variable_name Value
secure_file_priv
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
c1
one
two
diff --git a/mysql-test/suite/sys_vars/r/server_id_grant.result b/mysql-test/suite/sys_vars/r/server_id_grant.result
index f562560e8da..444c1a56d5a 100644
--- a/mysql-test/suite/sys_vars/r/server_id_grant.result
+++ b/mysql-test/suite/sys_vars/r/server_id_grant.result
@@ -2,14 +2,14 @@
# MDEV-21975 Add BINLOG REPLAY privilege and bind new privileges to gtid_seq_no, preudo_thread_id, server_id, gtid_domain_id
#
SET @global=@@global.server_id;
-# Test that "SET GLOBAL server_id" is not allowed without REPLICATION MASTER ADMIN or SUPER
+# Test that "SET GLOBAL server_id" is not allowed without REPLICATION MASTER ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION MASTER ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION MASTER ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL server_id=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION MASTER ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION MASTER ADMIN privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -22,27 +22,18 @@ SET GLOBAL server_id=1;
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET GLOBAL server_id" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL server_id=1;
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.server_id=@global;
SET @session=@@session.server_id;
-# Test that "SET server_id" is not allowed without BINLOG REPLAY or SUPER
+# Test that "SET server_id" is not allowed without BINLOG REPLAY
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG REPLAY, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG REPLAY ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET server_id=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation
SET SESSION server_id=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG REPLAY privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG REPLAY privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -56,14 +47,4 @@ SET SESSION server_id=1;
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET server_id" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET server_id=1;
-SET SESSION server_id=1;
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@session.server_id=@session;
diff --git a/mysql-test/suite/sys_vars/r/slave_compressed_protocol_grant.result b/mysql-test/suite/sys_vars/r/slave_compressed_protocol_grant.result
index 2f3f52982ea..3ef5417d613 100644
--- a/mysql-test/suite/sys_vars/r/slave_compressed_protocol_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_compressed_protocol_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_compressed_protocol;
-# Test that "SET slave_compressed_protocol" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_compressed_protocol" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_compressed_protocol=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_compressed_protocol=1;
ERROR HY000: Variable 'slave_compressed_protocol' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_compressed_protocol=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_compressed_protocol' is a GLOBAL variable and shoul
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_compressed_protocol" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_compressed_protocol=1;
-SET slave_compressed_protocol=1;
-ERROR HY000: Variable 'slave_compressed_protocol' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_compressed_protocol=1;
-ERROR HY000: Variable 'slave_compressed_protocol' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_compressed_protocol=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_ddl_exec_mode_grant.result b/mysql-test/suite/sys_vars/r/slave_ddl_exec_mode_grant.result
index d4f21f1e0ea..dc05d6ced6b 100644
--- a/mysql-test/suite/sys_vars/r/slave_ddl_exec_mode_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_ddl_exec_mode_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_ddl_exec_mode;
-# Test that "SET slave_ddl_exec_mode" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_ddl_exec_mode" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_ddl_exec_mode=STRICT;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_ddl_exec_mode=STRICT;
ERROR HY000: Variable 'slave_ddl_exec_mode' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_ddl_exec_mode=STRICT;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_ddl_exec_mode' is a GLOBAL variable and should be s
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_ddl_exec_mode" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_ddl_exec_mode=STRICT;
-SET slave_ddl_exec_mode=STRICT;
-ERROR HY000: Variable 'slave_ddl_exec_mode' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_ddl_exec_mode=STRICT;
-ERROR HY000: Variable 'slave_ddl_exec_mode' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_ddl_exec_mode=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_domain_parallel_threads_grant.result b/mysql-test/suite/sys_vars/r/slave_domain_parallel_threads_grant.result
index f31e48e276d..757b188a4d9 100644
--- a/mysql-test/suite/sys_vars/r/slave_domain_parallel_threads_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_domain_parallel_threads_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_domain_parallel_threads;
-# Test that "SET slave_domain_parallel_threads" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_domain_parallel_threads" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_domain_parallel_threads=0;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_domain_parallel_threads=0;
ERROR HY000: Variable 'slave_domain_parallel_threads' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_domain_parallel_threads=0;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_domain_parallel_threads' is a GLOBAL variable and s
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_domain_parallel_threads" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_domain_parallel_threads=0;
-SET slave_domain_parallel_threads=0;
-ERROR HY000: Variable 'slave_domain_parallel_threads' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_domain_parallel_threads=0;
-ERROR HY000: Variable 'slave_domain_parallel_threads' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_domain_parallel_threads=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_exec_mode_grant.result b/mysql-test/suite/sys_vars/r/slave_exec_mode_grant.result
index c1cfdba7c2a..7fd5481ce4f 100644
--- a/mysql-test/suite/sys_vars/r/slave_exec_mode_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_exec_mode_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_exec_mode;
-# Test that "SET slave_exec_mode" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_exec_mode" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_exec_mode=STRICT;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_exec_mode=STRICT;
ERROR HY000: Variable 'slave_exec_mode' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_exec_mode=STRICT;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_exec_mode' is a GLOBAL variable and should be set w
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_exec_mode" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_exec_mode=STRICT;
-SET slave_exec_mode=STRICT;
-ERROR HY000: Variable 'slave_exec_mode' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_exec_mode=STRICT;
-ERROR HY000: Variable 'slave_exec_mode' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_exec_mode=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_max_allowed_packet_grant.result b/mysql-test/suite/sys_vars/r/slave_max_allowed_packet_grant.result
index 664d580e1a6..45e79aad0b9 100644
--- a/mysql-test/suite/sys_vars/r/slave_max_allowed_packet_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_max_allowed_packet_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_max_allowed_packet;
-# Test that "SET slave_max_allowed_packet" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_max_allowed_packet" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_max_allowed_packet=65536;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_max_allowed_packet=65536;
ERROR HY000: Variable 'slave_max_allowed_packet' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_max_allowed_packet=65536;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_max_allowed_packet' is a GLOBAL variable and should
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_max_allowed_packet" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_max_allowed_packet=65536;
-SET slave_max_allowed_packet=65536;
-ERROR HY000: Variable 'slave_max_allowed_packet' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_max_allowed_packet=65536;
-ERROR HY000: Variable 'slave_max_allowed_packet' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_max_allowed_packet=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_net_timeout_grant.result b/mysql-test/suite/sys_vars/r/slave_net_timeout_grant.result
index 2b6664ab31f..da6e8b0475a 100644
--- a/mysql-test/suite/sys_vars/r/slave_net_timeout_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_net_timeout_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_net_timeout;
-# Test that "SET slave_net_timeout" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_net_timeout" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_net_timeout=60;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_net_timeout=60;
ERROR HY000: Variable 'slave_net_timeout' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_net_timeout=60;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_net_timeout' is a GLOBAL variable and should be set
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_net_timeout" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_net_timeout=60;
-SET slave_net_timeout=60;
-ERROR HY000: Variable 'slave_net_timeout' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_net_timeout=60;
-ERROR HY000: Variable 'slave_net_timeout' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_net_timeout=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_parallel_max_queued_grant.result b/mysql-test/suite/sys_vars/r/slave_parallel_max_queued_grant.result
index 315de47853b..9594c7ce511 100644
--- a/mysql-test/suite/sys_vars/r/slave_parallel_max_queued_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_parallel_max_queued_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_parallel_max_queued;
-# Test that "SET slave_parallel_max_queued" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_parallel_max_queued" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_parallel_max_queued=65536;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_parallel_max_queued=65536;
ERROR HY000: Variable 'slave_parallel_max_queued' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_parallel_max_queued=65536;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_parallel_max_queued' is a GLOBAL variable and shoul
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_parallel_max_queued" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_parallel_max_queued=65536;
-SET slave_parallel_max_queued=65536;
-ERROR HY000: Variable 'slave_parallel_max_queued' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_parallel_max_queued=65536;
-ERROR HY000: Variable 'slave_parallel_max_queued' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_parallel_max_queued=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_parallel_mode_grant.result b/mysql-test/suite/sys_vars/r/slave_parallel_mode_grant.result
index 6d5da37da7a..1bb894f918a 100644
--- a/mysql-test/suite/sys_vars/r/slave_parallel_mode_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_parallel_mode_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_parallel_mode;
-# Test that "SET slave_parallel_mode" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_parallel_mode" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_parallel_mode=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_parallel_mode=1;
ERROR HY000: Variable 'slave_parallel_mode' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_parallel_mode=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_parallel_mode' is a GLOBAL variable and should be s
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_parallel_mode" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_parallel_mode=1;
-SET slave_parallel_mode=1;
-ERROR HY000: Variable 'slave_parallel_mode' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_parallel_mode=1;
-ERROR HY000: Variable 'slave_parallel_mode' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_parallel_mode=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_parallel_threads_grant.result b/mysql-test/suite/sys_vars/r/slave_parallel_threads_grant.result
index 147252f78e3..a1dbcfc6ff6 100644
--- a/mysql-test/suite/sys_vars/r/slave_parallel_threads_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_parallel_threads_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_parallel_threads;
-# Test that "SET slave_parallel_threads" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_parallel_threads" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_parallel_threads=256;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_parallel_threads=256;
ERROR HY000: Variable 'slave_parallel_threads' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_parallel_threads=256;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_parallel_threads' is a GLOBAL variable and should b
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_parallel_threads" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_parallel_threads=256;
-SET slave_parallel_threads=256;
-ERROR HY000: Variable 'slave_parallel_threads' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_parallel_threads=256;
-ERROR HY000: Variable 'slave_parallel_threads' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_parallel_threads=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_parallel_workers_grant.result b/mysql-test/suite/sys_vars/r/slave_parallel_workers_grant.result
index 1985cc429e5..0de4af52b15 100644
--- a/mysql-test/suite/sys_vars/r/slave_parallel_workers_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_parallel_workers_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_parallel_workers;
-# Test that "SET slave_parallel_workers" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_parallel_workers" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_parallel_workers=256;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_parallel_workers=256;
ERROR HY000: Variable 'slave_parallel_workers' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_parallel_workers=256;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_parallel_workers' is a GLOBAL variable and should b
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_parallel_workers" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_parallel_workers=256;
-SET slave_parallel_workers=256;
-ERROR HY000: Variable 'slave_parallel_workers' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_parallel_workers=256;
-ERROR HY000: Variable 'slave_parallel_workers' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_parallel_workers=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_run_triggers_for_rbr_grant.result b/mysql-test/suite/sys_vars/r/slave_run_triggers_for_rbr_grant.result
index fd33c95f768..fea67a8063a 100644
--- a/mysql-test/suite/sys_vars/r/slave_run_triggers_for_rbr_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_run_triggers_for_rbr_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_run_triggers_for_rbr;
-# Test that "SET slave_run_triggers_for_rbr" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_run_triggers_for_rbr" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_run_triggers_for_rbr=YES;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_run_triggers_for_rbr=YES;
ERROR HY000: Variable 'slave_run_triggers_for_rbr' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_run_triggers_for_rbr=YES;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_run_triggers_for_rbr' is a GLOBAL variable and shou
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_run_triggers_for_rbr" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_run_triggers_for_rbr=YES;
-SET slave_run_triggers_for_rbr=YES;
-ERROR HY000: Variable 'slave_run_triggers_for_rbr' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_run_triggers_for_rbr=YES;
-ERROR HY000: Variable 'slave_run_triggers_for_rbr' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_run_triggers_for_rbr=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_sql_verify_checksum_grant.result b/mysql-test/suite/sys_vars/r/slave_sql_verify_checksum_grant.result
index 1cc4f3f3ef9..236b3a34793 100644
--- a/mysql-test/suite/sys_vars/r/slave_sql_verify_checksum_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_sql_verify_checksum_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_sql_verify_checksum;
-# Test that "SET slave_sql_verify_checksum" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_sql_verify_checksum" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_sql_verify_checksum=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_sql_verify_checksum=1;
ERROR HY000: Variable 'slave_sql_verify_checksum' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_sql_verify_checksum=1;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_sql_verify_checksum' is a GLOBAL variable and shoul
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_sql_verify_checksum" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_sql_verify_checksum=1;
-SET slave_sql_verify_checksum=1;
-ERROR HY000: Variable 'slave_sql_verify_checksum' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_sql_verify_checksum=1;
-ERROR HY000: Variable 'slave_sql_verify_checksum' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_sql_verify_checksum=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_transaction_retry_interval_grant.result b/mysql-test/suite/sys_vars/r/slave_transaction_retry_interval_grant.result
index e2e5e3963b5..ac144e62e5c 100644
--- a/mysql-test/suite/sys_vars/r/slave_transaction_retry_interval_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_transaction_retry_interval_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_transaction_retry_interval;
-# Test that "SET slave_transaction_retry_interval" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_transaction_retry_interval" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_transaction_retry_interval=256;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_transaction_retry_interval=256;
ERROR HY000: Variable 'slave_transaction_retry_interval' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_transaction_retry_interval=256;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_transaction_retry_interval' is a GLOBAL variable an
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_transaction_retry_interval" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_transaction_retry_interval=256;
-SET slave_transaction_retry_interval=256;
-ERROR HY000: Variable 'slave_transaction_retry_interval' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_transaction_retry_interval=256;
-ERROR HY000: Variable 'slave_transaction_retry_interval' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_transaction_retry_interval=@global;
diff --git a/mysql-test/suite/sys_vars/r/slave_type_conversions_grant.result b/mysql-test/suite/sys_vars/r/slave_type_conversions_grant.result
index 01022e73dcb..5057f190fc1 100644
--- a/mysql-test/suite/sys_vars/r/slave_type_conversions_grant.result
+++ b/mysql-test/suite/sys_vars/r/slave_type_conversions_grant.result
@@ -2,14 +2,14 @@
# MDEV-21966 Bind REPLICATION SLAVE ADMIN to a number of global system variables
#
SET @global=@@global.slave_type_conversions;
-# Test that "SET slave_type_conversions" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET slave_type_conversions" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slave_type_conversions=ALL_NON_LOSSY;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET slave_type_conversions=ALL_NON_LOSSY;
ERROR HY000: Variable 'slave_type_conversions' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slave_type_conversions=ALL_NON_LOSSY;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slave_type_conversions' is a GLOBAL variable and should b
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slave_type_conversions" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slave_type_conversions=ALL_NON_LOSSY;
-SET slave_type_conversions=ALL_NON_LOSSY;
-ERROR HY000: Variable 'slave_type_conversions' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slave_type_conversions=ALL_NON_LOSSY;
-ERROR HY000: Variable 'slave_type_conversions' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slave_type_conversions=@global;
diff --git a/mysql-test/suite/sys_vars/r/slow_launch_time_grant.result b/mysql-test/suite/sys_vars/r/slow_launch_time_grant.result
index 45aef07d473..52435ae3686 100644
--- a/mysql-test/suite/sys_vars/r/slow_launch_time_grant.result
+++ b/mysql-test/suite/sys_vars/r/slow_launch_time_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.slow_launch_time;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET slow_launch_time" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET slow_launch_time" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL slow_launch_time=10;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET slow_launch_time=10;
ERROR HY000: Variable 'slow_launch_time' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION slow_launch_time=10;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'slow_launch_time' is a GLOBAL variable and should be set
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET slow_launch_time" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL slow_launch_time=10;
-SET slow_launch_time=10;
-ERROR HY000: Variable 'slow_launch_time' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION slow_launch_time=10;
-ERROR HY000: Variable 'slow_launch_time' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.slow_launch_time=@global;
diff --git a/mysql-test/suite/sys_vars/r/sql_big_selects_func.result b/mysql-test/suite/sys_vars/r/sql_big_selects_func.result
index 609401c771c..104103f681e 100644
--- a/mysql-test/suite/sys_vars/r/sql_big_selects_func.result
+++ b/mysql-test/suite/sys_vars/r/sql_big_selects_func.result
@@ -3,7 +3,7 @@
SET @session_sql_big_selects = @@SESSION.sql_big_selects;
SET @session_max_join_size = @@SESSION.max_join_size;
SET @global_max_join_size = @@GLOBAL.max_join_size;
-SET MAX_JOIN_SIZE=9;
+SET MAX_JOIN_SIZE=21;
CREATE TEMPORARY TABLE t1(a varchar(20) not null, b varchar(20));
CREATE TEMPORARY TABLE t2(a varchar(20) null, b varchar(20));
INSERT INTO t1 VALUES('aa','bb');
diff --git a/mysql-test/suite/sys_vars/r/sql_log_bin_grant.result b/mysql-test/suite/sys_vars/r/sql_log_bin_grant.result
index 574c53e1f0a..336b2943f71 100644
--- a/mysql-test/suite/sys_vars/r/sql_log_bin_grant.result
+++ b/mysql-test/suite/sys_vars/r/sql_log_bin_grant.result
@@ -1,18 +1,18 @@
#
#
#
-# Test that "SET sql_log_bin" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET sql_log_bin" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET sql_log_bin=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET GLOBAL sql_log_bin=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET SESSION sql_log_bin=1;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
disconnect user1;
connection default;
DROP USER user1@localhost;
@@ -28,15 +28,3 @@ SET SESSION sql_log_bin=1;
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET sql_log_bin" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET sql_log_bin=1;
-SET GLOBAL sql_log_bin=1;
-ERROR HY000: Variable 'sql_log_bin' is a SESSION variable
-SET SESSION sql_log_bin=1;
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
diff --git a/mysql-test/suite/sys_vars/r/sync_binlog_grant.result b/mysql-test/suite/sys_vars/r/sync_binlog_grant.result
index 1fcdf8b000e..b66ba1fb946 100644
--- a/mysql-test/suite/sys_vars/r/sync_binlog_grant.result
+++ b/mysql-test/suite/sys_vars/r/sync_binlog_grant.result
@@ -2,14 +2,14 @@
# MDEV-21963 Bind BINLOG ADMIN to a number of global system variables
#
SET @global=@@global.sync_binlog;
-# Test that "SET sync_binlog" is not allowed without BINLOG ADMIN or SUPER
+# Test that "SET sync_binlog" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL sync_binlog=10;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, BINLOG ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the BINLOG ADMIN privilege(s) for this operation
SET sync_binlog=10;
ERROR HY000: Variable 'sync_binlog' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION sync_binlog=10;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'sync_binlog' is a GLOBAL variable and should be set with
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET sync_binlog" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL sync_binlog=10;
-SET sync_binlog=10;
-ERROR HY000: Variable 'sync_binlog' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION sync_binlog=10;
-ERROR HY000: Variable 'sync_binlog' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.sync_binlog=@global;
diff --git a/mysql-test/suite/sys_vars/r/sync_master_info_grant.result b/mysql-test/suite/sys_vars/r/sync_master_info_grant.result
index 40b04ea4480..6a81685d3cb 100644
--- a/mysql-test/suite/sys_vars/r/sync_master_info_grant.result
+++ b/mysql-test/suite/sys_vars/r/sync_master_info_grant.result
@@ -2,14 +2,14 @@
# MDEV-21969 Bind REPLICATION SLAVE ADMIN to relay_log_*, sync_master_info, sync_relay_log, sync_relay_log_info
#
SET @global=@@global.sync_master_info;
-# Test that "SET sync_master_info" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET sync_master_info" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL sync_master_info=20000;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET sync_master_info=20000;
ERROR HY000: Variable 'sync_master_info' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION sync_master_info=20000;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'sync_master_info' is a GLOBAL variable and should be set
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET sync_master_info" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL sync_master_info=20000;
-SET sync_master_info=20000;
-ERROR HY000: Variable 'sync_master_info' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION sync_master_info=20000;
-ERROR HY000: Variable 'sync_master_info' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.sync_master_info=@global;
diff --git a/mysql-test/suite/sys_vars/r/sync_relay_log_grant.result b/mysql-test/suite/sys_vars/r/sync_relay_log_grant.result
index f71e8325858..881a7b1fa11 100644
--- a/mysql-test/suite/sys_vars/r/sync_relay_log_grant.result
+++ b/mysql-test/suite/sys_vars/r/sync_relay_log_grant.result
@@ -2,14 +2,14 @@
# MDEV-21969 Bind REPLICATION SLAVE ADMIN to relay_log_*, sync_master_info, sync_relay_log, sync_relay_log_info
#
SET @global=@@global.sync_relay_log;
-# Test that "SET sync_relay_log" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET sync_relay_log" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL sync_relay_log=20000;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET sync_relay_log=20000;
ERROR HY000: Variable 'sync_relay_log' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION sync_relay_log=20000;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'sync_relay_log' is a GLOBAL variable and should be set wi
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET sync_relay_log" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL sync_relay_log=20000;
-SET sync_relay_log=20000;
-ERROR HY000: Variable 'sync_relay_log' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION sync_relay_log=20000;
-ERROR HY000: Variable 'sync_relay_log' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.sync_relay_log=@global;
diff --git a/mysql-test/suite/sys_vars/r/sync_relay_log_info_grant.result b/mysql-test/suite/sys_vars/r/sync_relay_log_info_grant.result
index 25727aac07c..18eae3fa932 100644
--- a/mysql-test/suite/sys_vars/r/sync_relay_log_info_grant.result
+++ b/mysql-test/suite/sys_vars/r/sync_relay_log_info_grant.result
@@ -2,14 +2,14 @@
# MDEV-21969 Bind REPLICATION SLAVE ADMIN to relay_log_*, sync_master_info, sync_relay_log, sync_relay_log_info
#
SET @global=@@global.sync_relay_log_info;
-# Test that "SET sync_relay_log_info" is not allowed without REPLICATION SLAVE ADMIN or SUPER
+# Test that "SET sync_relay_log_info" is not allowed without REPLICATION SLAVE ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE REPLICATION SLAVE ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE REPLICATION SLAVE ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL sync_relay_log_info=20000;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, REPLICATION SLAVE ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE ADMIN privilege(s) for this operation
SET sync_relay_log_info=20000;
ERROR HY000: Variable 'sync_relay_log_info' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION sync_relay_log_info=20000;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'sync_relay_log_info' is a GLOBAL variable and should be s
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET sync_relay_log_info" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL sync_relay_log_info=20000;
-SET sync_relay_log_info=20000;
-ERROR HY000: Variable 'sync_relay_log_info' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION sync_relay_log_info=20000;
-ERROR HY000: Variable 'sync_relay_log_info' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.sync_relay_log_info=@global;
diff --git a/mysql-test/suite/sys_vars/r/sysvars_aria,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_aria,32bit.rdiff
index f0cbbd874ee..3ebce38219c 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_aria,32bit.rdiff
+++ b/mysql-test/suite/sys_vars/r/sysvars_aria,32bit.rdiff
@@ -1,5 +1,5 @@
---- suite/sys_vars/r/sysvars_aria.result 2021-02-02 02:58:55.686921205 +0200
-+++ suite/sys_vars/r/sysvars_aria,32bit.reject 2021-02-02 10:55:53.876791633 +0200
+--- suite/sys_vars/r/sysvars_aria.result
++++ suite/sys_vars/r/sysvars_aria,32bit.reject
@@ -5,7 +5,7 @@
SESSION_VALUE NULL
DEFAULT_VALUE 8192
diff --git a/mysql-test/suite/sys_vars/r/sysvars_debug,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_debug,32bit.rdiff
index 8adb294db00..cb8338d4e9b 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_debug,32bit.rdiff
+++ b/mysql-test/suite/sys_vars/r/sysvars_debug,32bit.rdiff
@@ -1,5 +1,5 @@
---- r\sysvars_debug.result 2017-08-08 10:52:39.036804900 +0300
-+++ r\sysvars_debug,32bit.reject 2017-09-10 08:06:38.447122100 +0300
+--- r\sysvars_debug.result
++++ r\sysvars_debug,32bit.reject
@@ -21,7 +21,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE 0
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
index 6eface8c097..79341b85822 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
@@ -223,54 +223,6 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
-VARIABLE_NAME INNODB_CHANGE_BUFFERING
-SESSION_VALUE NULL
-DEFAULT_VALUE none
-VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE ENUM
-VARIABLE_COMMENT Buffer changes to secondary indexes.
-NUMERIC_MIN_VALUE NULL
-NUMERIC_MAX_VALUE NULL
-NUMERIC_BLOCK_SIZE NULL
-ENUM_VALUE_LIST none,inserts,deletes,changes,purges,all
-READ_ONLY NO
-COMMAND_LINE_ARGUMENT REQUIRED
-VARIABLE_NAME INNODB_CHANGE_BUFFERING_DEBUG
-SESSION_VALUE NULL
-DEFAULT_VALUE 0
-VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE INT UNSIGNED
-VARIABLE_COMMENT Debug flags for InnoDB change buffering (0=none, 1=try to buffer)
-NUMERIC_MIN_VALUE 0
-NUMERIC_MAX_VALUE 1
-NUMERIC_BLOCK_SIZE 0
-ENUM_VALUE_LIST NULL
-READ_ONLY NO
-COMMAND_LINE_ARGUMENT REQUIRED
-VARIABLE_NAME INNODB_CHANGE_BUFFER_DUMP
-SESSION_VALUE NULL
-DEFAULT_VALUE OFF
-VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE BOOLEAN
-VARIABLE_COMMENT Dump the change buffer at startup.
-NUMERIC_MIN_VALUE NULL
-NUMERIC_MAX_VALUE NULL
-NUMERIC_BLOCK_SIZE NULL
-ENUM_VALUE_LIST OFF,ON
-READ_ONLY YES
-COMMAND_LINE_ARGUMENT NONE
-VARIABLE_NAME INNODB_CHANGE_BUFFER_MAX_SIZE
-SESSION_VALUE NULL
-DEFAULT_VALUE 25
-VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE INT UNSIGNED
-VARIABLE_COMMENT Maximum on-disk size of change buffer in terms of percentage of the buffer pool.
-NUMERIC_MIN_VALUE 0
-NUMERIC_MAX_VALUE 50
-NUMERIC_BLOCK_SIZE 0
-ENUM_VALUE_LIST NULL
-READ_ONLY NO
-COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME INNODB_CHECKSUM_ALGORITHM
SESSION_VALUE NULL
DEFAULT_VALUE full_crc32
@@ -355,6 +307,18 @@ NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
+VARIABLE_NAME INNODB_DATA_FILE_BUFFERING
+SESSION_VALUE NULL
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Whether the file system cache for data files is enabled
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME INNODB_DATA_FILE_PATH
SESSION_VALUE NULL
DEFAULT_VALUE ibdata1:12M:autoextend
@@ -379,6 +343,18 @@ NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME INNODB_DATA_FILE_WRITE_THROUGH
+SESSION_VALUE NULL
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Whether each write to data files writes through
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME INNODB_DATA_HOME_DIR
SESSION_VALUE NULL
DEFAULT_VALUE
@@ -1015,6 +991,18 @@ NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME INNODB_LOG_FILE_WRITE_THROUGH
+SESSION_VALUE NULL
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Whether each write to ib_logfile0 is write through
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME INNODB_LOG_GROUP_HOME_DIR
SESSION_VALUE NULL
DEFAULT_VALUE
@@ -1665,7 +1653,7 @@ READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME INNODB_UNDO_TABLESPACES
SESSION_VALUE NULL
-DEFAULT_VALUE 0
+DEFAULT_VALUE 3
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT Number of undo tablespaces to use.
diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
index f896421559c..b9a333afeae 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
@@ -1665,7 +1665,7 @@ COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME LOG_SLOW_ADMIN_STATEMENTS
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BOOLEAN
-VARIABLE_COMMENT Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements to the slow log if it is open. Resets or sets the option 'admin' in log_slow_disabled_statements
+VARIABLE_COMMENT Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements to the slow log if it is open. Resets or sets the option 'admin' in log_slow_filter. Deprecated, use log_slow_filter without 'admin'.
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
@@ -2292,6 +2292,26 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_DISK_READ_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of reading a block of IO_SIZE (4096) from a disk (in usec).
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 10000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_DISK_READ_RATIO
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Chance that we have to do a disk read to find a row or index entry from the engine cache (cache_misses/total_cache_requests). 0.0 means that everything is cached and 1.0 means that nothing is expected to be in the engine cache.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_EXTRA_PRUNING_DEPTH
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
@@ -2302,6 +2322,56 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_INDEX_BLOCK_COPY_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of copying a key block from the cache to intern storage as part of an index scan.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_COMPARE_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of checking a key against the end key condition.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_COPY_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding the next key in the engine and copying it to the SQL layer.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_LOOKUP_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost for finding a key based on a key value
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_NEXT_FIND_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding the next key and rowid when using filters.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_MAX_SEL_ARG_WEIGHT
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
@@ -2322,6 +2392,66 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROWID_COMPARE_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of comparing two rowid's
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROWID_COPY_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of copying a rowid
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROW_COPY_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of copying a row from the engine or the join cache to the SQL layer.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROW_LOOKUP_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding a row based on a rowid or a clustered key.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROW_NEXT_FIND_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding the next row when scanning the table.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_SCAN_SETUP_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Extra cost added to TABLE and INDEX scans to get optimizer to prefer index lookups.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 100000000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_SEARCH_DEPTH
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
@@ -2382,6 +2512,16 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_WHERE_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of checking the row against the WHERE clause. Increasing this will have the optimizer to prefer plans with less row combinations.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 100000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
index 23942418b07..075c1ba959a 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
@@ -1815,7 +1815,7 @@ COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME LOG_SLOW_ADMIN_STATEMENTS
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BOOLEAN
-VARIABLE_COMMENT Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements to the slow log if it is open. Resets or sets the option 'admin' in log_slow_disabled_statements
+VARIABLE_COMMENT Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements to the slow log if it is open. Resets or sets the option 'admin' in log_slow_filter. Deprecated, use log_slow_filter without 'admin'.
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
@@ -2462,6 +2462,26 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_DISK_READ_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of reading a block of IO_SIZE (4096) from a disk (in usec).
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 10000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_DISK_READ_RATIO
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Chance that we have to do a disk read to find a row or index entry from the engine cache (cache_misses/total_cache_requests). 0.0 means that everything is cached and 1.0 means that nothing is expected to be in the engine cache.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_EXTRA_PRUNING_DEPTH
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
@@ -2472,6 +2492,56 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_INDEX_BLOCK_COPY_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of copying a key block from the cache to intern storage as part of an index scan.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_COMPARE_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of checking a key against the end key condition.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_COPY_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding the next key in the engine and copying it to the SQL layer.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_LOOKUP_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost for finding a key based on a key value
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_KEY_NEXT_FIND_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding the next key and rowid when using filters.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_MAX_SEL_ARG_WEIGHT
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
@@ -2492,6 +2562,66 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROWID_COMPARE_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of comparing two rowid's
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROWID_COPY_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of copying a rowid
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROW_COPY_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of copying a row from the engine or the join cache to the SQL layer.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROW_LOOKUP_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding a row based on a rowid or a clustered key.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_ROW_NEXT_FIND_COST
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of finding the next row when scanning the table.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 1000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_SCAN_SETUP_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Extra cost added to TABLE and INDEX scans to get optimizer to prefer index lookups.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 100000000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_SEARCH_DEPTH
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
@@ -2552,6 +2682,16 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_WHERE_COST
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Cost of checking the row against the WHERE clause. Increasing this will have the optimizer to prefer plans with less row combinations.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 100000
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
diff --git a/mysql-test/suite/sys_vars/r/sysvars_star.result b/mysql-test/suite/sys_vars/r/sysvars_star.result
index b80515db23d..b3357fda3af 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_star.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_star.result
@@ -60,7 +60,7 @@ VARIABLE_NAME PLUGIN_MATURITY
SESSION_VALUE NULL
GLOBAL_VALUE alpha
GLOBAL_VALUE_ORIGIN CONFIG
-DEFAULT_VALUE gamma
+DEFAULT_VALUE beta
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE ENUM
VARIABLE_COMMENT The lowest desirable plugin maturity. Plugins less mature than that will not be installed or loaded
diff --git a/mysql-test/suite/sys_vars/r/sysvars_wsrep,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_wsrep,32bit.rdiff
index 016bd016f29..73b788057cb 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_wsrep,32bit.rdiff
+++ b/mysql-test/suite/sys_vars/r/sysvars_wsrep,32bit.rdiff
@@ -1,5 +1,5 @@
---- suite/sys_vars/r/sysvars_wsrep.result 2014-10-10 13:33:55.000000000 +0300
-+++ suite/sys_vars/r/sysvars_wsrep,32bit.reject 2014-10-10 19:38:09.000000000 +0300
+--- suite/sys_vars/r/sysvars_wsrep.result
++++ suite/sys_vars/r/sysvars_wsrep,32bit.reject
@@ -245,7 +245,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 131072
diff --git a/mysql-test/suite/sys_vars/r/thread_pool_idle_timeout_grant.result b/mysql-test/suite/sys_vars/r/thread_pool_idle_timeout_grant.result
index f9e14eaff0e..a9f3021d6ed 100644
--- a/mysql-test/suite/sys_vars/r/thread_pool_idle_timeout_grant.result
+++ b/mysql-test/suite/sys_vars/r/thread_pool_idle_timeout_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.thread_pool_idle_timeout;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET thread_pool_idle_timeout" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET thread_pool_idle_timeout" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL thread_pool_idle_timeout=10;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET thread_pool_idle_timeout=10;
ERROR HY000: Variable 'thread_pool_idle_timeout' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION thread_pool_idle_timeout=10;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'thread_pool_idle_timeout' is a GLOBAL variable and should
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET thread_pool_idle_timeout" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL thread_pool_idle_timeout=10;
-SET thread_pool_idle_timeout=10;
-ERROR HY000: Variable 'thread_pool_idle_timeout' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION thread_pool_idle_timeout=10;
-ERROR HY000: Variable 'thread_pool_idle_timeout' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.thread_pool_idle_timeout=@global;
diff --git a/mysql-test/suite/sys_vars/r/thread_pool_max_threads_grant.result b/mysql-test/suite/sys_vars/r/thread_pool_max_threads_grant.result
index 17511766787..40ea57a75c2 100644
--- a/mysql-test/suite/sys_vars/r/thread_pool_max_threads_grant.result
+++ b/mysql-test/suite/sys_vars/r/thread_pool_max_threads_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.thread_pool_max_threads;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET thread_pool_max_threads" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET thread_pool_max_threads" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL thread_pool_max_threads=10;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET thread_pool_max_threads=10;
ERROR HY000: Variable 'thread_pool_max_threads' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION thread_pool_max_threads=10;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'thread_pool_max_threads' is a GLOBAL variable and should
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET thread_pool_max_threads" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL thread_pool_max_threads=10;
-SET thread_pool_max_threads=10;
-ERROR HY000: Variable 'thread_pool_max_threads' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION thread_pool_max_threads=10;
-ERROR HY000: Variable 'thread_pool_max_threads' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.thread_pool_max_threads=@global;
diff --git a/mysql-test/suite/sys_vars/r/thread_pool_oversubscribe_grant.result b/mysql-test/suite/sys_vars/r/thread_pool_oversubscribe_grant.result
index 965be157f02..ddb5cc01126 100644
--- a/mysql-test/suite/sys_vars/r/thread_pool_oversubscribe_grant.result
+++ b/mysql-test/suite/sys_vars/r/thread_pool_oversubscribe_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.thread_pool_oversubscribe;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET thread_pool_oversubscribe" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET thread_pool_oversubscribe" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL thread_pool_oversubscribe=10;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET thread_pool_oversubscribe=10;
ERROR HY000: Variable 'thread_pool_oversubscribe' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION thread_pool_oversubscribe=10;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'thread_pool_oversubscribe' is a GLOBAL variable and shoul
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET thread_pool_oversubscribe" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL thread_pool_oversubscribe=10;
-SET thread_pool_oversubscribe=10;
-ERROR HY000: Variable 'thread_pool_oversubscribe' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION thread_pool_oversubscribe=10;
-ERROR HY000: Variable 'thread_pool_oversubscribe' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.thread_pool_oversubscribe=@global;
diff --git a/mysql-test/suite/sys_vars/r/thread_pool_stall_limit_grant.result b/mysql-test/suite/sys_vars/r/thread_pool_stall_limit_grant.result
index 08462dc4733..335f60ea888 100644
--- a/mysql-test/suite/sys_vars/r/thread_pool_stall_limit_grant.result
+++ b/mysql-test/suite/sys_vars/r/thread_pool_stall_limit_grant.result
@@ -2,14 +2,14 @@ SET @global=@@global.thread_pool_stall_limit;
#
# MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
#
-# Test that "SET thread_pool_stall_limit" is not allowed without CONNECTION ADMIN or SUPER
+# Test that "SET thread_pool_stall_limit" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
connect user1,localhost,user1,,;
connection user1;
SET GLOBAL thread_pool_stall_limit=400;
-ERROR 42000: Access denied; you need (at least one of) the SUPER, CONNECTION ADMIN privilege(s) for this operation
+ERROR 42000: Access denied; you need (at least one of) the CONNECTION ADMIN privilege(s) for this operation
SET thread_pool_stall_limit=400;
ERROR HY000: Variable 'thread_pool_stall_limit' is a GLOBAL variable and should be set with SET GLOBAL
SET SESSION thread_pool_stall_limit=400;
@@ -30,17 +30,4 @@ ERROR HY000: Variable 'thread_pool_stall_limit' is a GLOBAL variable and should
disconnect user1;
connection default;
DROP USER user1@localhost;
-# Test that "SET thread_pool_stall_limit" is allowed with SUPER
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
-connect user1,localhost,user1,,;
-connection user1;
-SET GLOBAL thread_pool_stall_limit=400;
-SET thread_pool_stall_limit=400;
-ERROR HY000: Variable 'thread_pool_stall_limit' is a GLOBAL variable and should be set with SET GLOBAL
-SET SESSION thread_pool_stall_limit=400;
-ERROR HY000: Variable 'thread_pool_stall_limit' is a GLOBAL variable and should be set with SET GLOBAL
-disconnect user1;
-connection default;
-DROP USER user1@localhost;
SET @@global.thread_pool_stall_limit=@global;
diff --git a/mysql-test/suite/sys_vars/t/binlog_direct_non_transactional_updates_grant.test b/mysql-test/suite/sys_vars/t/binlog_direct_non_transactional_updates_grant.test
index 7de1a119308..98970a4314b 100644
--- a/mysql-test/suite/sys_vars/t/binlog_direct_non_transactional_updates_grant.test
+++ b/mysql-test/suite/sys_vars/t/binlog_direct_non_transactional_updates_grant.test
@@ -3,16 +3,15 @@ source include/have_log_bin.inc;
SET @global= @@global.binlog_direct_non_transactional_updates;
SET @session= @@global.binlog_direct_non_transactional_updates;
-
--echo #
--echo #
--echo #
---echo # Test that "SET binlog_direct_non_transactional_updates" is not allowed without BINLOG ADMIN or SUPER
+--echo # Test that "SET binlog_direct_non_transactional_updates" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -38,19 +37,5 @@ SET SESSION binlog_direct_non_transactional_updates=0;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET binlog_direct_non_transactional_updates" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET binlog_direct_non_transactional_updates=0;
-SET GLOBAL binlog_direct_non_transactional_updates=0;
-SET SESSION binlog_direct_non_transactional_updates=0;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
-
SET GLOBAL binlog_direct_non_transactional_updates=@global;
SET SESSION binlog_direct_non_transactional_updates=@session;
diff --git a/mysql-test/suite/sys_vars/t/binlog_format_grant.test b/mysql-test/suite/sys_vars/t/binlog_format_grant.test
index 6f89c75a79e..6f834db4d51 100644
--- a/mysql-test/suite/sys_vars/t/binlog_format_grant.test
+++ b/mysql-test/suite/sys_vars/t/binlog_format_grant.test
@@ -1,15 +1,14 @@
source include/have_log_bin.inc;
-
--echo #
--echo #
--echo #
---echo # Test that "SET binlog_format" is not allowed without BINLOG ADMIN or SUPER
+--echo # Test that "SET binlog_format" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -34,16 +33,3 @@ SET SESSION binlog_format=mixed;
--disconnect user1
--connection default
DROP USER user1@localhost;
-
---echo # Test that "SET binlog_format" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET binlog_format=mixed;
-SET GLOBAL binlog_format=mixed;
-SET SESSION binlog_format=mixed;
---disconnect user1
---connection default
-DROP USER user1@localhost;
diff --git a/mysql-test/suite/sys_vars/t/connect_timeout_grant.test b/mysql-test/suite/sys_vars/t/connect_timeout_grant.test
index 61a7a3eb530..fa90b79eb1c 100644
--- a/mysql-test/suite/sys_vars/t/connect_timeout_grant.test
+++ b/mysql-test/suite/sys_vars/t/connect_timeout_grant.test
@@ -6,11 +6,11 @@ SET @global=@@global.connect_timeout;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET connect_timeout" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET connect_timeout" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -38,19 +38,4 @@ SET SESSION connect_timeout=10;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET connect_timeout" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL connect_timeout=10;
---error ER_GLOBAL_VARIABLE
-SET connect_timeout=10;
---error ER_GLOBAL_VARIABLE
-SET SESSION connect_timeout=10;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.connect_timeout=@global;
diff --git a/mysql-test/suite/sys_vars/t/disconnect_on_expired_password_grant.test b/mysql-test/suite/sys_vars/t/disconnect_on_expired_password_grant.test
index 0d317d298f4..e901d944e84 100644
--- a/mysql-test/suite/sys_vars/t/disconnect_on_expired_password_grant.test
+++ b/mysql-test/suite/sys_vars/t/disconnect_on_expired_password_grant.test
@@ -6,11 +6,11 @@ SET @global=@@global.disconnect_on_expired_password;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET disconnect_on_expired_password" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET disconnect_on_expired_password" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -38,19 +38,4 @@ SET SESSION disconnect_on_expired_password=1;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET disconnect_on_expired_password" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL disconnect_on_expired_password=1;
---error ER_GLOBAL_VARIABLE
-SET disconnect_on_expired_password=1;
---error ER_GLOBAL_VARIABLE
-SET SESSION disconnect_on_expired_password=1;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.disconnect_on_expired_password=@global;
diff --git a/mysql-test/suite/sys_vars/t/extra_max_connections_grant.test b/mysql-test/suite/sys_vars/t/extra_max_connections_grant.test
index 058364ec0a8..de35fa2cbc4 100644
--- a/mysql-test/suite/sys_vars/t/extra_max_connections_grant.test
+++ b/mysql-test/suite/sys_vars/t/extra_max_connections_grant.test
@@ -6,11 +6,11 @@ SET @global=@@global.extra_max_connections;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET extra_max_connections" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET extra_max_connections" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -38,19 +38,4 @@ SET SESSION extra_max_connections=10;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET extra_max_connections" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL extra_max_connections=10;
---error ER_GLOBAL_VARIABLE
-SET extra_max_connections=10;
---error ER_GLOBAL_VARIABLE
-SET SESSION extra_max_connections=10;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.extra_max_connections=@global;
diff --git a/mysql-test/suite/sys_vars/t/gtid_binlog_state_grant.test b/mysql-test/suite/sys_vars/t/gtid_binlog_state_grant.test
index b1cfa320c0f..b08a439c7dc 100644
--- a/mysql-test/suite/sys_vars/t/gtid_binlog_state_grant.test
+++ b/mysql-test/suite/sys_vars/t/gtid_binlog_state_grant.test
@@ -1,6 +1,5 @@
--source include/not_embedded.inc
-
--echo #
--echo # MDEV-21973 Bind REPLICATION {MASTER|SLAVE} ADMIN to gtid_* GLOBAL-only system variables
--echo #
@@ -11,17 +10,15 @@
# or "Binlog closed, cannot RESET MASTER" on success.
#
-
--let var = gtid_binlog_state
--let grant = REPLICATION MASTER ADMIN
--let value = '0-1-10'
-
---echo # Test that "SET $var" is not allowed without $grant or SUPER
+--echo # Test that "SET $var" is not allowed without $grant
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
---eval REVOKE $grant, SUPER ON *.* FROM user1@localhost
+--eval REVOKE $grant ON *.* FROM user1@localhost
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -49,19 +46,3 @@ CREATE USER user1@localhost;
--disconnect user1
--connection default
DROP USER user1@localhost;
-
---echo # Test that "SET $var" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
---error ER_FLUSH_MASTER_BINLOG_CLOSED
---eval SET GLOBAL $var=$value
---error ER_GLOBAL_VARIABLE
---eval SET $var=$value
---error ER_GLOBAL_VARIABLE
---eval SET SESSION $var=$value
---disconnect user1
---connection default
-DROP USER user1@localhost;
diff --git a/mysql-test/suite/sys_vars/t/init_connect_grant.test b/mysql-test/suite/sys_vars/t/init_connect_grant.test
index 685f0900833..8f211459485 100644
--- a/mysql-test/suite/sys_vars/t/init_connect_grant.test
+++ b/mysql-test/suite/sys_vars/t/init_connect_grant.test
@@ -6,11 +6,11 @@ SET @global=@@global.init_connect;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET init_connect" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET init_connect" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -38,19 +38,4 @@ SET SESSION init_connect="SET @xxx=1";
--connection default
DROP USER user1@localhost;
---echo # Test that "SET init_connect" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL init_connect="SET @xxx=1";
---error ER_GLOBAL_VARIABLE
-SET init_connect="SET @xxx=1";
---error ER_GLOBAL_VARIABLE
-SET SESSION init_connect="SET @xxx=1";
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.init_connect=@global;
diff --git a/mysql-test/suite/sys_vars/t/innodb_change_buffer_max_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_change_buffer_max_size_basic.test
deleted file mode 100644
index 2094ef3dc0b..00000000000
--- a/mysql-test/suite/sys_vars/t/innodb_change_buffer_max_size_basic.test
+++ /dev/null
@@ -1,71 +0,0 @@
-
-
-# 2011-02-09 - Added
-#
-
---source include/have_innodb.inc
-
-SET @start_global_value = @@global.innodb_change_buffer_max_size;
-SELECT @start_global_value;
-
-#
-# exists as global only
-#
---echo Valid values are between 0 and 50
-select @@global.innodb_change_buffer_max_size between 0 and 50;
-select @@global.innodb_change_buffer_max_size;
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-select @@session.innodb_change_buffer_max_size;
-show global variables like 'innodb_change_buffer_max_size';
-show session variables like 'innodb_change_buffer_max_size';
---disable_warnings
-select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size';
-select * from information_schema.session_variables where variable_name='innodb_change_buffer_max_size';
---enable_warnings
-
-#
-# show that it's writable
-#
-set global innodb_change_buffer_max_size=10;
-select @@global.innodb_change_buffer_max_size;
---disable_warnings
-select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size';
-select * from information_schema.session_variables where variable_name='innodb_change_buffer_max_size';
---enable_warnings
---error ER_GLOBAL_VARIABLE
-set session innodb_change_buffer_max_size=1;
-
-#
-# incorrect types
-#
---error ER_WRONG_TYPE_FOR_VAR
-set global innodb_change_buffer_max_size=1.1;
---error ER_WRONG_TYPE_FOR_VAR
-set global innodb_change_buffer_max_size=1e1;
---error ER_WRONG_TYPE_FOR_VAR
-set global innodb_change_buffer_max_size="foo";
-
-set global innodb_change_buffer_max_size=-7;
-select @@global.innodb_change_buffer_max_size;
---disable_warnings
-select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size';
---enable_warnings
-set global innodb_change_buffer_max_size=56;
-select @@global.innodb_change_buffer_max_size;
---disable_warnings
-select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size';
---enable_warnings
-
-#
-# min/max/DEFAULT values
-#
-set global innodb_change_buffer_max_size=0;
-select @@global.innodb_change_buffer_max_size;
-set global innodb_change_buffer_max_size=50;
-select @@global.innodb_change_buffer_max_size;
-set global innodb_change_buffer_max_size=DEFAULT;
-select @@global.innodb_change_buffer_max_size;
-
-
-SET @@global.innodb_change_buffer_max_size = @start_global_value;
-SELECT @@global.innodb_change_buffer_max_size;
diff --git a/mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test b/mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test
deleted file mode 100644
index 19f0890feff..00000000000
--- a/mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test
+++ /dev/null
@@ -1,65 +0,0 @@
-
-
-# 2010-01-25 - Added
-#
-
---source include/have_innodb.inc
-
-SET @start_global_value = @@global.innodb_change_buffering;
-SELECT @start_global_value;
-
-#
-# exists as global only
-#
---echo Valid values are 'all', 'deletes', 'changes', 'inserts', 'none', 'purges'
-select @@global.innodb_change_buffering in ('all', 'deletes', 'changes', 'inserts', 'none', 'purges');
-select @@global.innodb_change_buffering;
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-select @@session.innodb_change_buffering;
-show global variables like 'innodb_change_buffering';
-show session variables like 'innodb_change_buffering';
---disable_warnings
-select * from information_schema.global_variables where variable_name='innodb_change_buffering';
-select * from information_schema.session_variables where variable_name='innodb_change_buffering';
---enable_warnings
-
-#
-# show that it's writable
-#
-set global innodb_change_buffering='none';
-select @@global.innodb_change_buffering;
---disable_warnings
-select * from information_schema.global_variables where variable_name='innodb_change_buffering';
-select * from information_schema.session_variables where variable_name='innodb_change_buffering';
---enable_warnings
-set @@global.innodb_change_buffering='inserts';
-select @@global.innodb_change_buffering;
---disable_warnings
-select * from information_schema.global_variables where variable_name='innodb_change_buffering';
-select * from information_schema.session_variables where variable_name='innodb_change_buffering';
---enable_warnings
---error ER_GLOBAL_VARIABLE
-set session innodb_change_buffering='some';
---error ER_GLOBAL_VARIABLE
-set @@session.innodb_change_buffering='some';
-
-#
-# incorrect types
-#
---error ER_WRONG_TYPE_FOR_VAR
-set global innodb_change_buffering=1.1;
-set global innodb_change_buffering=1;
-SELECT @@global.innodb_change_buffering;
---error ER_WRONG_VALUE_FOR_VAR
-set global innodb_change_buffering=-2;
---error ER_WRONG_TYPE_FOR_VAR
-set global innodb_change_buffering=1e1;
---error ER_WRONG_VALUE_FOR_VAR
-set global innodb_change_buffering='some';
-
-#
-# Cleanup
-#
-
-SET @@global.innodb_change_buffering = @start_global_value;
-SELECT @@global.innodb_change_buffering;
diff --git a/mysql-test/suite/sys_vars/t/innodb_change_buffering_debug_basic.test b/mysql-test/suite/sys_vars/t/innodb_change_buffering_debug_basic.test
deleted file mode 100644
index 70f8bee1523..00000000000
--- a/mysql-test/suite/sys_vars/t/innodb_change_buffering_debug_basic.test
+++ /dev/null
@@ -1,59 +0,0 @@
---source include/have_innodb.inc
---source include/have_debug.inc
-
-SET @start_global_value = @@global.innodb_change_buffering_debug;
-SELECT @start_global_value;
-
-#
-# exists as global only
-#
-select @@global.innodb_change_buffering_debug in (0, 1);
-select @@global.innodb_change_buffering_debug;
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-select @@session.innodb_change_buffering_debug;
-show global variables like 'innodb_change_buffering_debug';
-show session variables like 'innodb_change_buffering_debug';
---disable_warnings
-select * from information_schema.global_variables where variable_name='innodb_change_buffering_debug';
-select * from information_schema.session_variables where variable_name='innodb_change_buffering_debug';
---enable_warnings
-
-#
-# show that it's writable
-#
-set global innodb_change_buffering_debug=1;
-select @@global.innodb_change_buffering_debug;
---disable_warnings
-select * from information_schema.global_variables where variable_name='innodb_change_buffering_debug';
-select * from information_schema.session_variables where variable_name='innodb_change_buffering_debug';
---enable_warnings
-set @@global.innodb_change_buffering_debug=0;
-select @@global.innodb_change_buffering_debug;
---disable_warnings
-select * from information_schema.global_variables where variable_name='innodb_change_buffering_debug';
-select * from information_schema.session_variables where variable_name='innodb_change_buffering_debug';
---enable_warnings
---error ER_GLOBAL_VARIABLE
-set session innodb_change_buffering_debug='some';
---error ER_GLOBAL_VARIABLE
-set @@session.innodb_change_buffering_debug='some';
-
-#
-# incorrect types
-#
---error ER_WRONG_TYPE_FOR_VAR
-set global innodb_change_buffering_debug=1.1;
---error ER_WRONG_TYPE_FOR_VAR
-set global innodb_change_buffering_debug='foo';
-set global innodb_change_buffering_debug=-2;
---error ER_WRONG_TYPE_FOR_VAR
-set global innodb_change_buffering_debug=1e1;
-set global innodb_change_buffering_debug=2;
-select @@global.innodb_change_buffering_debug;
-
-#
-# Cleanup
-#
-
-SET @@global.innodb_change_buffering_debug = @start_global_value;
-SELECT @@global.innodb_change_buffering_debug;
diff --git a/mysql-test/suite/sys_vars/t/max_connect_errors_grant.test b/mysql-test/suite/sys_vars/t/max_connect_errors_grant.test
index d349d93818d..589be6567fc 100644
--- a/mysql-test/suite/sys_vars/t/max_connect_errors_grant.test
+++ b/mysql-test/suite/sys_vars/t/max_connect_errors_grant.test
@@ -6,11 +6,11 @@ SET @global=@@global.max_connect_errors;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET max_connect_errors" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET max_connect_errors" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -38,19 +38,4 @@ SET SESSION max_connect_errors=10;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET max_connect_errors" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL max_connect_errors=10;
---error ER_GLOBAL_VARIABLE
-SET max_connect_errors=10;
---error ER_GLOBAL_VARIABLE
-SET SESSION max_connect_errors=10;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.max_connect_errors=@global;
diff --git a/mysql-test/suite/sys_vars/t/max_connections_grant.test b/mysql-test/suite/sys_vars/t/max_connections_grant.test
index d51bd08ed9d..1364e526ad8 100644
--- a/mysql-test/suite/sys_vars/t/max_connections_grant.test
+++ b/mysql-test/suite/sys_vars/t/max_connections_grant.test
@@ -6,11 +6,11 @@ SET @global=@@global.max_connections;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET max_connections" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET max_connections" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -38,19 +38,4 @@ SET SESSION max_connections=10;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET max_connections" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL max_connections=10;
---error ER_GLOBAL_VARIABLE
-SET max_connections=10;
---error ER_GLOBAL_VARIABLE
-SET SESSION max_connections=10;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.max_connections=@global;
diff --git a/mysql-test/suite/sys_vars/t/max_join_size_basic.test b/mysql-test/suite/sys_vars/t/max_join_size_basic.test
index bbe13457ee7..1741857d6ca 100644
--- a/mysql-test/suite/sys_vars/t/max_join_size_basic.test
+++ b/mysql-test/suite/sys_vars/t/max_join_size_basic.test
@@ -23,7 +23,7 @@ select * from information_schema.session_variables where variable_name='max_join
# show that it's writable
#
set global max_join_size=10;
-set session max_join_size=20;
+set session max_join_size=100;
select @@global.max_join_size;
select @@session.max_join_size;
show global variables like 'max_join_size';
diff --git a/mysql-test/suite/sys_vars/t/max_join_size_func.test b/mysql-test/suite/sys_vars/t/max_join_size_func.test
index c649c036565..5fc8ee5855b 100644
--- a/mysql-test/suite/sys_vars/t/max_join_size_func.test
+++ b/mysql-test/suite/sys_vars/t/max_join_size_func.test
@@ -84,7 +84,7 @@ connect (test_con1, localhost, root,,);
connection test_con1;
--echo ## Setting value of max_join_size ##
-SET @@session.max_join_size=8;
+SET @@session.max_join_size=4;
--echo ## Since total joins are more than max_join_size value so error will occur ##
--Error ER_TOO_BIG_SELECT
@@ -97,7 +97,7 @@ SELECT * FROM t1 INNER JOIN t2 ON t1.id = t2.id;
##########################################################
--echo ## Setting global value of variable ##
-SET @@global.max_join_size=8;
+SET @@global.max_join_size=4;
connect (test_con2, localhost, root,,);
connection test_con2;
diff --git a/mysql-test/suite/sys_vars/t/max_password_errors_grant.test b/mysql-test/suite/sys_vars/t/max_password_errors_grant.test
index f11c1fb8226..c8b9e998898 100644
--- a/mysql-test/suite/sys_vars/t/max_password_errors_grant.test
+++ b/mysql-test/suite/sys_vars/t/max_password_errors_grant.test
@@ -6,11 +6,11 @@ SET @global=@@global.max_password_errors;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET max_password_errors" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET max_password_errors" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -38,19 +38,4 @@ SET SESSION max_password_errors=10;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET max_password_errors" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL max_password_errors=10;
---error ER_GLOBAL_VARIABLE
-SET max_password_errors=10;
---error ER_GLOBAL_VARIABLE
-SET SESSION max_password_errors=10;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.max_password_errors=@global;
diff --git a/mysql-test/suite/sys_vars/t/proxy_protocol_networks_grant.test b/mysql-test/suite/sys_vars/t/proxy_protocol_networks_grant.test
index f2dd8158424..85538755768 100644
--- a/mysql-test/suite/sys_vars/t/proxy_protocol_networks_grant.test
+++ b/mysql-test/suite/sys_vars/t/proxy_protocol_networks_grant.test
@@ -6,11 +6,11 @@ SET @global=@@global.proxy_protocol_networks;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET proxy_protocol_networks" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET proxy_protocol_networks" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -38,19 +38,4 @@ SET SESSION proxy_protocol_networks="";
--connection default
DROP USER user1@localhost;
---echo # Test that "SET proxy_protocol_networks" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL proxy_protocol_networks="";
---error ER_GLOBAL_VARIABLE
-SET proxy_protocol_networks="";
---error ER_GLOBAL_VARIABLE
-SET SESSION proxy_protocol_networks="";
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.proxy_protocol_networks=@global;
diff --git a/mysql-test/suite/sys_vars/t/secure_auth_grant.test b/mysql-test/suite/sys_vars/t/secure_auth_grant.test
index f597ec790cb..3cec03a2eb1 100644
--- a/mysql-test/suite/sys_vars/t/secure_auth_grant.test
+++ b/mysql-test/suite/sys_vars/t/secure_auth_grant.test
@@ -6,11 +6,11 @@ SET @global=@@global.secure_auth;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET secure_auth" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET secure_auth" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -38,19 +38,4 @@ SET SESSION secure_auth=1;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET secure_auth" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL secure_auth=1;
---error ER_GLOBAL_VARIABLE
-SET secure_auth=1;
---error ER_GLOBAL_VARIABLE
-SET SESSION secure_auth=1;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.secure_auth=@global;
diff --git a/mysql-test/suite/sys_vars/t/slow_launch_time_grant.test b/mysql-test/suite/sys_vars/t/slow_launch_time_grant.test
index 1a81930593e..de0ded83a40 100644
--- a/mysql-test/suite/sys_vars/t/slow_launch_time_grant.test
+++ b/mysql-test/suite/sys_vars/t/slow_launch_time_grant.test
@@ -6,11 +6,11 @@ SET @global=@@global.slow_launch_time;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET slow_launch_time" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET slow_launch_time" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -38,19 +38,4 @@ SET SESSION slow_launch_time=10;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET slow_launch_time" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL slow_launch_time=10;
---error ER_GLOBAL_VARIABLE
-SET slow_launch_time=10;
---error ER_GLOBAL_VARIABLE
-SET SESSION slow_launch_time=10;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.slow_launch_time=@global;
diff --git a/mysql-test/suite/sys_vars/t/sql_big_selects_func.test b/mysql-test/suite/sys_vars/t/sql_big_selects_func.test
index 59d8184861d..b8ff7c53f75 100644
--- a/mysql-test/suite/sys_vars/t/sql_big_selects_func.test
+++ b/mysql-test/suite/sys_vars/t/sql_big_selects_func.test
@@ -28,7 +28,7 @@
SET @session_sql_big_selects = @@SESSION.sql_big_selects;
SET @session_max_join_size = @@SESSION.max_join_size;
SET @global_max_join_size = @@GLOBAL.max_join_size;
-SET MAX_JOIN_SIZE=9;
+SET MAX_JOIN_SIZE=21;
#
# Create tables
@@ -115,8 +115,6 @@ disconnect con_int2;
#
# Cleanup
#
-
-
SET @@SESSION.sql_big_selects = @session_sql_big_selects;
SET @@SESSION.max_join_size = @session_max_join_size;
SET @@GLOBAL.max_join_size = @global_max_join_size;
diff --git a/mysql-test/suite/sys_vars/t/sql_log_bin_grant.test b/mysql-test/suite/sys_vars/t/sql_log_bin_grant.test
index d7ffc2b613b..fa0baefac38 100644
--- a/mysql-test/suite/sys_vars/t/sql_log_bin_grant.test
+++ b/mysql-test/suite/sys_vars/t/sql_log_bin_grant.test
@@ -1,15 +1,14 @@
source include/have_log_bin.inc;
-
--echo #
--echo #
--echo #
---echo # Test that "SET sql_log_bin" is not allowed without BINLOG ADMIN or SUPER
+--echo # Test that "SET sql_log_bin" is not allowed without BINLOG ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE BINLOG ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE BINLOG ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -35,17 +34,3 @@ SET SESSION sql_log_bin=1;
--disconnect user1
--connection default
DROP USER user1@localhost;
-
---echo # Test that "SET sql_log_bin" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET sql_log_bin=1;
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-SET GLOBAL sql_log_bin=1;
-SET SESSION sql_log_bin=1;
---disconnect user1
---connection default
-DROP USER user1@localhost;
diff --git a/mysql-test/suite/sys_vars/t/thread_pool_idle_timeout_grant.test b/mysql-test/suite/sys_vars/t/thread_pool_idle_timeout_grant.test
index 8f5f2491e12..33b97ac6304 100644
--- a/mysql-test/suite/sys_vars/t/thread_pool_idle_timeout_grant.test
+++ b/mysql-test/suite/sys_vars/t/thread_pool_idle_timeout_grant.test
@@ -8,11 +8,11 @@ SET @global=@@global.thread_pool_idle_timeout;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET thread_pool_idle_timeout" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET thread_pool_idle_timeout" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -40,19 +40,4 @@ SET SESSION thread_pool_idle_timeout=10;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET thread_pool_idle_timeout" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL thread_pool_idle_timeout=10;
---error ER_GLOBAL_VARIABLE
-SET thread_pool_idle_timeout=10;
---error ER_GLOBAL_VARIABLE
-SET SESSION thread_pool_idle_timeout=10;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.thread_pool_idle_timeout=@global;
diff --git a/mysql-test/suite/sys_vars/t/thread_pool_max_threads_grant.test b/mysql-test/suite/sys_vars/t/thread_pool_max_threads_grant.test
index 299d5ff4f44..b77df1493ee 100644
--- a/mysql-test/suite/sys_vars/t/thread_pool_max_threads_grant.test
+++ b/mysql-test/suite/sys_vars/t/thread_pool_max_threads_grant.test
@@ -8,11 +8,11 @@ SET @global=@@global.thread_pool_max_threads;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET thread_pool_max_threads" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET thread_pool_max_threads" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -40,19 +40,4 @@ SET SESSION thread_pool_max_threads=10;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET thread_pool_max_threads" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL thread_pool_max_threads=10;
---error ER_GLOBAL_VARIABLE
-SET thread_pool_max_threads=10;
---error ER_GLOBAL_VARIABLE
-SET SESSION thread_pool_max_threads=10;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.thread_pool_max_threads=@global;
diff --git a/mysql-test/suite/sys_vars/t/thread_pool_oversubscribe_grant.test b/mysql-test/suite/sys_vars/t/thread_pool_oversubscribe_grant.test
index f3a96c69e8a..3ff215ae7fa 100644
--- a/mysql-test/suite/sys_vars/t/thread_pool_oversubscribe_grant.test
+++ b/mysql-test/suite/sys_vars/t/thread_pool_oversubscribe_grant.test
@@ -8,11 +8,11 @@ SET @global=@@global.thread_pool_oversubscribe;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET thread_pool_oversubscribe" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET thread_pool_oversubscribe" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -40,19 +40,4 @@ SET SESSION thread_pool_oversubscribe=10;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET thread_pool_oversubscribe" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL thread_pool_oversubscribe=10;
---error ER_GLOBAL_VARIABLE
-SET thread_pool_oversubscribe=10;
---error ER_GLOBAL_VARIABLE
-SET SESSION thread_pool_oversubscribe=10;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.thread_pool_oversubscribe=@global;
diff --git a/mysql-test/suite/sys_vars/t/thread_pool_stall_limit_grant.test b/mysql-test/suite/sys_vars/t/thread_pool_stall_limit_grant.test
index ae0ed8a785b..2c57fecd11e 100644
--- a/mysql-test/suite/sys_vars/t/thread_pool_stall_limit_grant.test
+++ b/mysql-test/suite/sys_vars/t/thread_pool_stall_limit_grant.test
@@ -8,11 +8,11 @@ SET @global=@@global.thread_pool_stall_limit;
--echo # MDEV-21961 Bind CONNECTION ADMIN to a number of global system variables
--echo #
---echo # Test that "SET thread_pool_stall_limit" is not allowed without CONNECTION ADMIN or SUPER
+--echo # Test that "SET thread_pool_stall_limit" is not allowed without CONNECTION ADMIN
CREATE USER user1@localhost;
GRANT ALL PRIVILEGES ON *.* TO user1@localhost;
-REVOKE CONNECTION ADMIN, SUPER ON *.* FROM user1@localhost;
+REVOKE CONNECTION ADMIN ON *.* FROM user1@localhost;
--connect(user1,localhost,user1,,)
--connection user1
--error ER_SPECIFIC_ACCESS_DENIED_ERROR
@@ -40,19 +40,4 @@ SET SESSION thread_pool_stall_limit=400;
--connection default
DROP USER user1@localhost;
---echo # Test that "SET thread_pool_stall_limit" is allowed with SUPER
-
-CREATE USER user1@localhost;
-GRANT SUPER ON *.* TO user1@localhost;
---connect(user1,localhost,user1,,)
---connection user1
-SET GLOBAL thread_pool_stall_limit=400;
---error ER_GLOBAL_VARIABLE
-SET thread_pool_stall_limit=400;
---error ER_GLOBAL_VARIABLE
-SET SESSION thread_pool_stall_limit=400;
---disconnect user1
---connection default
-DROP USER user1@localhost;
-
SET @@global.thread_pool_stall_limit=@global;
diff --git a/mysql-test/suite/sysschema/r/all_sys_objects_exist.result b/mysql-test/suite/sysschema/r/all_sys_objects_exist.result
index 6dddd8a186c..4c1bf311450 100644
--- a/mysql-test/suite/sysschema/r/all_sys_objects_exist.result
+++ b/mysql-test/suite/sysschema/r/all_sys_objects_exist.result
@@ -129,6 +129,9 @@ version_patch FUNCTION
create_synonym_db PROCEDURE
diagnostics PROCEDURE
execute_prepared_stmt PROCEDURE
+optimizer_switch_choice PROCEDURE
+optimizer_switch_off PROCEDURE
+optimizer_switch_on PROCEDURE
ps_setup_disable_background_threads PROCEDURE
ps_setup_disable_consumer PROCEDURE
ps_setup_disable_instrument PROCEDURE
diff --git a/mysql-test/suite/sysschema/r/optimizer_switch.result b/mysql-test/suite/sysschema/r/optimizer_switch.result
new file mode 100644
index 00000000000..017276fc4b8
--- /dev/null
+++ b/mysql-test/suite/sysschema/r/optimizer_switch.result
@@ -0,0 +1,40 @@
+call sys.optimizer_switch_on();
+option opt
+condition_pushdown_for_derived on
+condition_pushdown_for_subquery on
+condition_pushdown_from_having on
+derived_merge on
+derived_with_keys on
+exists_to_in on
+extended_keys on
+firstmatch on
+index_condition_pushdown on
+index_merge on
+index_merge_intersection on
+index_merge_sort_union on
+index_merge_union on
+in_to_exists on
+join_cache_bka on
+join_cache_hashed on
+join_cache_incremental on
+loosescan on
+materialization on
+optimize_join_buffer_size on
+orderby_uses_equalities on
+outer_join_with_cache on
+partial_match_rowid_merge on
+partial_match_table_scan on
+rowid_filter on
+semijoin on
+semijoin_with_cache on
+split_materialized on
+subquery_cache on
+table_elimination on
+call sys.optimizer_switch_off();
+option opt
+engine_condition_pushdown off
+index_merge_sort_intersection off
+mrr off
+mrr_cost_based off
+mrr_sort_keys off
+not_null_range_scan off
diff --git a/mysql-test/suite/sysschema/r/v_schema_redundant_indexes.result b/mysql-test/suite/sysschema/r/v_schema_redundant_indexes.result
index 2199c959cd0..8893726fe12 100644
--- a/mysql-test/suite/sysschema/r/v_schema_redundant_indexes.result
+++ b/mysql-test/suite/sysschema/r/v_schema_redundant_indexes.result
@@ -32,6 +32,6 @@ KEY (i, j, k)
);
SELECT * FROM sys.schema_redundant_indexes;
table_schema table_name redundant_index_name redundant_index_columns redundant_index_non_unique dominant_index_name dominant_index_columns dominant_index_non_unique subpart_exists sql_drop_index
-rkey rkey j j 1 j_2 j,k 1 0 ALTER TABLE `rkey`.`rkey` DROP INDEX `j`
rkey rkey i i,j,k 1 PRIMARY i 0 0 ALTER TABLE `rkey`.`rkey` DROP INDEX `i`
+rkey rkey j j 1 j_2 j,k 1 0 ALTER TABLE `rkey`.`rkey` DROP INDEX `j`
DROP DATABASE rkey;
diff --git a/mysql-test/suite/sysschema/t/mysqldump.test b/mysql-test/suite/sysschema/t/mysqldump.test
index 35abc704117..22f358c1f55 100644
--- a/mysql-test/suite/sysschema/t/mysqldump.test
+++ b/mysql-test/suite/sysschema/t/mysqldump.test
@@ -32,4 +32,4 @@ DROP TEMPORARY TABLE tmp_tables_priv;
FLUSH PRIVILEGES;
--let $MYSQLD_DATADIR= `select @@datadir`
---remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+--remove_file $MYSQLD_DATADIR/mariadb_upgrade_info
diff --git a/mysql-test/suite/sysschema/t/optimizer_switch.test b/mysql-test/suite/sysschema/t/optimizer_switch.test
new file mode 100644
index 00000000000..b4d527e5519
--- /dev/null
+++ b/mysql-test/suite/sysschema/t/optimizer_switch.test
@@ -0,0 +1,2 @@
+call sys.optimizer_switch_on();
+call sys.optimizer_switch_off();
diff --git a/mysql-test/suite/sysschema/t/pr_statement_performance_analyzer.test b/mysql-test/suite/sysschema/t/pr_statement_performance_analyzer.test
index b8932381e52..96b59a55e6d 100644
--- a/mysql-test/suite/sysschema/t/pr_statement_performance_analyzer.test
+++ b/mysql-test/suite/sysschema/t/pr_statement_performance_analyzer.test
@@ -155,9 +155,10 @@ CALL sys.statement_performance_analyzer('delta', 'test.tmp_digests_ini', 'analys
CALL sys.statement_performance_analyzer('overall', NULL, 'with_errors_or_warnings');
CALL sys.statement_performance_analyzer('delta', 'test.tmp_digests_ini', 'with_errors_or_warnings');
---replace_result $query_select QUERY_SELECT $digest_select DIGEST_SELECT $o_sel_total_latency LATENCY $o_sel_first_seen FIRST_SEEN $o_sel_last_seen LAST_SEEN
+--replace_column 4 LATENCY
+--replace_result $query_select QUERY_SELECT $digest_select DIGEST_SELECT $o_sel_first_seen FIRST_SEEN $o_sel_last_seen LAST_SEEN $digest_update DIGEST_UPDATE $query_update QUERY_UPDATE $o_upd_first_seen FIRST_SEEN $o_upd_last_seen LAST_SEEN
CALL sys.statement_performance_analyzer('overall', NULL, 'with_full_table_scans');
---replace_result $query_select QUERY_SELECT $digest_select DIGEST_SELECT $d_sel_total_latency LATENCY $o_sel_first_seen FIRST_SEEN $o_sel_last_seen LAST_SEEN
+--replace_result $query_select QUERY_SELECT $digest_select DIGEST_SELECT $d_sel_total_latency LATENCY $o_sel_first_seen FIRST_SEEN $o_sel_last_seen LAST_SEEN
CALL sys.statement_performance_analyzer('delta', 'test.tmp_digests_ini', 'with_full_table_scans');
--replace_result $query_select QUERY_SELECT $digest_select DIGEST_SELECT $o_sel_total_latency LATENCY $o_sel_first_seen FIRST_SEEN $o_sel_last_seen LAST_SEEN
diff --git a/mysql-test/suite/sysschema/t/v_schema_redundant_indexes.test b/mysql-test/suite/sysschema/t/v_schema_redundant_indexes.test
index 0cd2ac91fa4..ed40a294238 100644
--- a/mysql-test/suite/sysschema/t/v_schema_redundant_indexes.test
+++ b/mysql-test/suite/sysschema/t/v_schema_redundant_indexes.test
@@ -34,6 +34,7 @@ CREATE TABLE rkey.rkey (
KEY (i, j, k)
);
+--sorted_result
SELECT * FROM sys.schema_redundant_indexes;
DROP DATABASE rkey;
diff --git a/mysql-test/suite/vcol/inc/vcol_ins_upd.inc b/mysql-test/suite/vcol/inc/vcol_ins_upd.inc
index 8cf0fb9ed6c..c523083d370 100644
--- a/mysql-test/suite/vcol/inc/vcol_ins_upd.inc
+++ b/mysql-test/suite/vcol/inc/vcol_ins_upd.inc
@@ -29,6 +29,8 @@ let $create4 = create table t1 (a int,
d varchar(16));
eval $create1;
set sql_warnings = 1;
+# Prefer table scans to range
+set @@optimizer_scan_setup_cost=0;
--echo #
--echo # *** INSERT ***
@@ -197,6 +199,7 @@ select * from t1;
--echo # UPDATE tbl_name SET non-vcol=expr
--echo # WHERE vcol=between const1 and const2 ORDER BY vcol LIMIT 2
insert into t1 (a) values (1), (2), (3), (4), (5);
+
select * from t1;
update t1 set a=6 where c between -1 and 0
order by c limit 2;
diff --git a/mysql-test/suite/vcol/r/vcol_ins_upd_innodb.result b/mysql-test/suite/vcol/r/vcol_ins_upd_innodb.result
index 6807f89fdbe..624477d20f0 100644
--- a/mysql-test/suite/vcol/r/vcol_ins_upd_innodb.result
+++ b/mysql-test/suite/vcol/r/vcol_ins_upd_innodb.result
@@ -3,6 +3,7 @@ create table t1 (a int,
b int as (-a),
c int as (-a) persistent);
set sql_warnings = 1;
+set @@optimizer_scan_setup_cost=0;
#
# *** INSERT ***
#
diff --git a/mysql-test/suite/vcol/r/vcol_ins_upd_myisam.result b/mysql-test/suite/vcol/r/vcol_ins_upd_myisam.result
index 43206dba31b..823b4f520e6 100644
--- a/mysql-test/suite/vcol/r/vcol_ins_upd_myisam.result
+++ b/mysql-test/suite/vcol/r/vcol_ins_upd_myisam.result
@@ -3,6 +3,7 @@ create table t1 (a int,
b int as (-a),
c int as (-a) persistent);
set sql_warnings = 1;
+set @@optimizer_scan_setup_cost=0;
#
# *** INSERT ***
#
diff --git a/mysql-test/suite/vcol/r/vcol_select_innodb.result b/mysql-test/suite/vcol/r/vcol_select_innodb.result
index 40308b6e072..57a17cbe468 100644
--- a/mysql-test/suite/vcol/r/vcol_select_innodb.result
+++ b/mysql-test/suite/vcol/r/vcol_select_innodb.result
@@ -135,7 +135,7 @@ count(distinct c)
3
explain select count(distinct c) from t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL c 5 NULL 6 Using index for group-by
+1 SIMPLE t1 range NULL c 5 NULL 5 Using index for group-by
###
### filesort & range-based utils
###
diff --git a/mysql-test/suite/vcol/r/vcol_select_myisam.result b/mysql-test/suite/vcol/r/vcol_select_myisam.result
index 05f86347706..8964eda1ba8 100644
--- a/mysql-test/suite/vcol/r/vcol_select_myisam.result
+++ b/mysql-test/suite/vcol/r/vcol_select_myisam.result
@@ -74,7 +74,7 @@ a b c
explain select * from t1 where c in (select c from t3 where c between -2 and -1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 range c c 5 NULL 2 Using where; Using index
-1 PRIMARY t1 ref c c 5 test.t3.c 2
+1 PRIMARY t1 ref c c 5 test.t3.c 1
# select_type=UNION, type=system
# select_type=UNION RESULT, type=<union1,2>
select * from t1 union select * from t2;
@@ -133,7 +133,7 @@ count(distinct c)
3
explain select count(distinct c) from t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL c 5 NULL 6 Using index for group-by
+1 SIMPLE t1 range NULL c 5 NULL 5 Using index for group-by
###
### filesort & range-based utils
###
diff --git a/mysql-test/suite/versioning/r/alter.result b/mysql-test/suite/versioning/r/alter.result
index 9751f7c718d..7f7323ba47f 100644
--- a/mysql-test/suite/versioning/r/alter.result
+++ b/mysql-test/suite/versioning/r/alter.result
@@ -199,8 +199,6 @@ a
2
1
select row_start from t where a=3 into @tm;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
alter table t add column b int;
select @tm=row_start from t where a=3;
@tm=row_start
diff --git a/mysql-test/suite/versioning/r/commit_id.result b/mysql-test/suite/versioning/r/commit_id.result
index 8815613292e..abf2eaf91ba 100644
--- a/mysql-test/suite/versioning/r/commit_id.result
+++ b/mysql-test/suite/versioning/r/commit_id.result
@@ -10,8 +10,6 @@ insert into t1 values ();
set @ts0= now(6);
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx0;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select transaction_id = @tx0 from mysql.transaction_registry
order by transaction_id desc limit 1;
transaction_id = @tx0
@@ -19,8 +17,6 @@ transaction_id = @tx0
set @ts1= now(6);
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx1;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select transaction_id = @tx1 from mysql.transaction_registry
order by transaction_id desc limit 1;
transaction_id = @tx1
@@ -28,8 +24,6 @@ transaction_id = @tx1
set @ts2= now(6);
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx2;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select transaction_id = @tx2 from mysql.transaction_registry
order by transaction_id desc limit 1;
transaction_id = @tx2
@@ -72,32 +66,24 @@ trt_trx_sees(0, @tx2)
set transaction isolation level read uncommitted;
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx3;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select isolation_level = 'READ-UNCOMMITTED' from mysql.transaction_registry where transaction_id = @tx3;
isolation_level = 'READ-UNCOMMITTED'
1
set transaction isolation level read committed;
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx4;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select isolation_level = 'READ-COMMITTED' from mysql.transaction_registry where transaction_id = @tx4;
isolation_level = 'READ-COMMITTED'
1
set transaction isolation level serializable;
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx5;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select isolation_level = 'SERIALIZABLE' from mysql.transaction_registry where transaction_id = @tx5;
isolation_level = 'SERIALIZABLE'
1
set transaction isolation level repeatable read;
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx6;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select isolation_level = 'REPEATABLE-READ' from mysql.transaction_registry where transaction_id = @tx6;
isolation_level = 'REPEATABLE-READ'
1
diff --git a/mysql-test/suite/versioning/r/create.result b/mysql-test/suite/versioning/r/create.result
index c9d68f15e57..158f0b7faf3 100644
--- a/mysql-test/suite/versioning/r/create.result
+++ b/mysql-test/suite/versioning/r/create.result
@@ -271,12 +271,8 @@ t3 CREATE TABLE `t3` (
## For versioned table
insert into t1 values (1);
select row_start from t1 into @row_start;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into t0 (y) values (2);
select st from t0 into @st;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create or replace table t2 with system versioning as select * from t1;
show create table t2;
Table Create Table
@@ -339,12 +335,8 @@ ERROR 42S21: Duplicate column name 'row_end'
# Prepare checking for historical row
delete from t1;
select row_end from t1 for system_time all into @row_end;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
delete from t0;
select en from t0 for system_time all into @en;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
## Combinations of versioned + non-versioned
create or replace table t2 (y int);
insert into t2 values (3);
@@ -365,14 +357,10 @@ insert into t2 (y) values (1), (2);
delete from t2 where y = 2;
create or replace table t3 select * from t2 for system_time all;
select st, en from t3 where y = 1 into @st, @en;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select y from t2 for system_time all where st = @st and en = @en;
y
1
select st, en from t3 where y = 2 into @st, @en;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select y from t2 for system_time all where st = @st and en = @en;
y
2
diff --git a/mysql-test/suite/versioning/r/cte.result b/mysql-test/suite/versioning/r/cte.result
index 6ca9c238d45..11d478ac456 100644
--- a/mysql-test/suite/versioning/r/cte.result
+++ b/mysql-test/suite/versioning/r/cte.result
@@ -61,7 +61,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 4 100.00
2 DERIVED e ALL NULL NULL NULL NULL 4 100.00 Using where
3 RECURSIVE UNION e ALL mgr-fk NULL NULL NULL 4 100.00 Using where
-3 RECURSIVE UNION <derived2> ref key0 key0 5 test.e.mgr 2 100.00
+3 RECURSIVE UNION <derived2> ref key0 key0 5 test.e.mgr 1 100.00
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1003 with recursive ancestors as (/* select#2 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME AS OF TIMESTAMP @`ts_1` `e` where `test`.`e`.`name` = 'bill' and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1` union /* select#3 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME AS OF TIMESTAMP @`ts_1` `e` join `ancestors` `a` where `a`.`emp_id` = `test`.`e`.`mgr` and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1`)/* select#1 */ select `ancestors`.`emp_id` AS `emp_id`,`ancestors`.`name` AS `name`,`ancestors`.`mgr` AS `mgr`,`ancestors`.`salary` AS `salary` from `ancestors`
@@ -102,7 +102,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 4 100.00
2 DERIVED e ALL NULL NULL NULL NULL 4 100.00 Using where
3 RECURSIVE UNION e ALL mgr-fk NULL NULL NULL 4 100.00 Using where
-3 RECURSIVE UNION <derived2> ref key0 key0 5 test.e.mgr 2 100.00
+3 RECURSIVE UNION <derived2> ref key0 key0 5 test.e.mgr 1 100.00
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1003 with recursive ancestors as (/* select#2 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME AS OF TIMESTAMP @`ts_1` `e` where `test`.`e`.`name` = 'bill' and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1` union /* select#3 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME AS OF TIMESTAMP @`ts_1` `e` join `ancestors` `a` where `a`.`emp_id` = `test`.`e`.`mgr` and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1`)/* select#1 */ select `ancestors`.`emp_id` AS `emp_id`,`ancestors`.`name` AS `name`,`ancestors`.`mgr` AS `mgr`,`ancestors`.`salary` AS `salary` from `ancestors`
@@ -140,10 +140,10 @@ where e.mgr = a.emp_id
select name from emp where emp_id in (select emp_id from ancestors for system_time as of timestamp @ts_1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY emp ALL PRIMARY NULL NULL NULL 4 100.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.emp.emp_id 2 100.00 FirstMatch(emp)
+1 PRIMARY <derived2> ref key0 key0 5 test.emp.emp_id 1 100.00 FirstMatch(emp)
2 DERIVED e ALL NULL NULL NULL NULL 4 100.00 Using where
3 RECURSIVE UNION e ALL mgr-fk NULL NULL NULL 4 100.00 Using where
-3 RECURSIVE UNION <derived2> ref key0 key0 5 test.e.mgr 2 100.00
+3 RECURSIVE UNION <derived2> ref key0 key0 5 test.e.mgr 1 100.00
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1003 with recursive ancestors as (/* select#2 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME AS OF TIMESTAMP @`ts_1` `e` where `test`.`e`.`name` = 'bill' and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1` union /* select#3 */ select `test`.`e`.`emp_id` AS `emp_id`,`test`.`e`.`name` AS `name`,`test`.`e`.`mgr` AS `mgr`,`test`.`e`.`salary` AS `salary` from `test`.`emp` FOR SYSTEM_TIME AS OF TIMESTAMP @`ts_1` `e` join `ancestors` `a` where `a`.`emp_id` = `test`.`e`.`mgr` and `test`.`e`.`row_end` > @`ts_1` and `test`.`e`.`row_start` <= @`ts_1`)/* select#1 */ select `test`.`emp`.`name` AS `name` from `test`.`emp` semi join (`ancestors`) where `ancestors`.`emp_id` = `test`.`emp`.`emp_id` and `test`.`emp`.`row_end` = TIMESTAMP'2038-01-19 03:14:07.999999'
diff --git a/mysql-test/suite/versioning/r/foreign.result b/mysql-test/suite/versioning/r/foreign.result
index b17deba1c1e..7c8d9577096 100644
--- a/mysql-test/suite/versioning/r/foreign.result
+++ b/mysql-test/suite/versioning/r/foreign.result
@@ -274,8 +274,6 @@ on update cascade
) engine=innodb;
insert into parent (value) values (23);
select id, value from parent into @id, @value;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into child values (default, @id, @value);
insert into subchild values (default, @id, @value);
select parent_id from subchild;
diff --git a/mysql-test/suite/versioning/r/insert.result b/mysql-test/suite/versioning/r/insert.result
index 442d71c6a91..fab71b9f1cc 100644
--- a/mysql-test/suite/versioning/r/insert.result
+++ b/mysql-test/suite/versioning/r/insert.result
@@ -54,8 +54,6 @@ drop view vt1_1;
create or replace table t1( id bigint primary key, a int, b int) with system versioning;
insert into t1 values(1, 1, 1);
select row_start, row_end from t1 into @sys_start, @sys_end;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select id, a, b from t1;
id a b
1 1 1
diff --git a/mysql-test/suite/versioning/r/load_data.result b/mysql-test/suite/versioning/r/load_data.result
index 1fcde73e565..5e7b36c9a6a 100644
--- a/mysql-test/suite/versioning/r/load_data.result
+++ b/mysql-test/suite/versioning/r/load_data.result
@@ -1,8 +1,6 @@
CREATE TABLE t1 (a INT, b INT, c INT, vc INT AS (c), UNIQUE(a), UNIQUE(b)) WITH SYSTEM VERSIONING;
INSERT IGNORE INTO t1 (a,b,c) VALUES (1,2,3);
SELECT a, b, c FROM t1 INTO OUTFILE '15330.data';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
LOAD DATA INFILE '15330.data' IGNORE INTO TABLE t1 (a,b,c);
Warnings:
Warning 1062 Duplicate entry '1' for key 'a'
diff --git a/mysql-test/suite/versioning/r/partition.result b/mysql-test/suite/versioning/r/partition.result
index febb19bf48e..3656db2fe06 100644
--- a/mysql-test/suite/versioning/r/partition.result
+++ b/mysql-test/suite/versioning/r/partition.result
@@ -178,8 +178,6 @@ x C D
1 1 1
set @str= concat('select row_start from t1 partition (pn) into @ts0');
prepare stmt from @str;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
set @now= now(6);
@@ -191,8 +189,6 @@ execute select_pn;
x C D
set @str= concat('select row_start from t1 partition (p0) into @ts1');
prepare stmt from @str;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
select @ts0 = @ts1;
@@ -208,8 +204,6 @@ x C D
2 1 1
set @str= concat('select row_start from t1 partition (pn) into @ts0');
prepare stmt from @str;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
set @now= now(6);
@@ -225,20 +219,14 @@ drop prepare select_p0;
drop prepare select_pn;
set @str= concat('select row_start from t1 partition (p0) where x = 2 into @ts1');
prepare stmt from @str;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
set @str= concat('select row_end from t1 partition (p0) where x = 2 into @ts2');
prepare stmt from @str;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
set @str= concat('select row_start from t1 partition (pn) into @ts3');
prepare stmt from @str;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
select @ts0 = @ts1;
@@ -821,8 +809,6 @@ create or replace table t2 (f int);
create or replace trigger tr before insert on t2
for each row select table_rows from information_schema.tables
where table_name = 't1' into @a;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into t2 values (1);
#
# MDEV-14740 Locking assertion for system_time partitioning
@@ -832,8 +818,6 @@ partition by system_time interval 1 week;
create or replace table t2 (f int);
create or replace trigger tr before insert on t2
for each row select count(*) from t1 into @a;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into t2 values (1);
#
# MDEV-14747 ALTER PARTITION BY SYSTEM_TIME after LOCK TABLES
diff --git a/mysql-test/suite/versioning/r/select,trx_id.rdiff b/mysql-test/suite/versioning/r/select,trx_id.rdiff
deleted file mode 100644
index 8906007a348..00000000000
--- a/mysql-test/suite/versioning/r/select,trx_id.rdiff
+++ /dev/null
@@ -1,11 +0,0 @@
---- select.result 2018-06-29 18:09:17.962447067 +0200
-+++ select.reject 2018-06-29 18:10:04.618808616 +0200
-@@ -17,6 +17,8 @@
- (8, 108),
- (9, 109);
- set @t0= now(6);
-+Warnings:
-+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
- delete from t1 where x = 3;
- delete from t1 where x > 7;
- insert into t1(x, y) values(3, 33);
diff --git a/mysql-test/suite/versioning/r/select.result b/mysql-test/suite/versioning/r/select.result
index 90c99d1bf0e..9841b4dedbb 100644
--- a/mysql-test/suite/versioning/r/select.result
+++ b/mysql-test/suite/versioning/r/select.result
@@ -23,8 +23,6 @@ delete from t1 where x = 3;
delete from t1 where x > 7;
insert into t1(x, y) values(3, 33);
select sys_trx_start from t1 where x = 3 and y = 33 into @t1;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select x, y from t1;
x y
0 100
@@ -382,8 +380,6 @@ insert into t1 values (1);
set @ts= now(6);
delete from t1;
select sys_trx_start from t1 for system_time all into @trx_start;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
## ensure @trx_start is much lower than unix timestamp
select @trx_start < unix_timestamp(@ts) - 100 as trx_start_good;
trx_start_good
@@ -577,11 +573,7 @@ period for system_time (row_start, row_end)
insert into t1 values (1);
delete from t1;
select row_start from t1 for system_time all into @t1;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select row_end from t1 for system_time all into @t2;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select * from t1 for system_time between @t1 and @t2;
a
1
diff --git a/mysql-test/suite/versioning/r/select2,trx_id.rdiff b/mysql-test/suite/versioning/r/select2,trx_id.rdiff
index bdc20d1dc4f..45555385c31 100644
--- a/mysql-test/suite/versioning/r/select2,trx_id.rdiff
+++ b/mysql-test/suite/versioning/r/select2,trx_id.rdiff
@@ -1,15 +1,15 @@
---- select2.result 2018-06-29 17:51:17.142172085 +0200
-+++ select2,trx_id.reject 2018-06-29 18:03:49.034273090 +0200
-@@ -26,6 +26,8 @@
+--- suite/versioning/r/select2.result 2022-12-12 19:34:34.242342915 +0200
++++ suite/versioning/r/select2,trx_id.reject 2022-12-12 19:37:18.721907294 +0200
+@@ -22,6 +22,8 @@
+ delete from t1 where x > 7;
+ insert into t1(x, y) values(3, 33);
select sys_start from t1 where x = 3 and y = 33 into @t1;
- Warnings:
- Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+set @x1= @t1;
+select trt_commit_ts(@x1) into @t1;
select x, y from t1;
x y
0 100
-@@ -86,7 +88,7 @@
+@@ -82,7 +84,7 @@
8 108
9 109
3 33
@@ -18,7 +18,7 @@
ASOF2_x y
0 100
1 101
-@@ -98,7 +100,7 @@
+@@ -94,7 +96,7 @@
7 107
8 108
9 109
@@ -27,7 +27,7 @@
FROMTO2_x y
0 100
1 101
-@@ -110,7 +112,7 @@
+@@ -106,7 +108,7 @@
7 107
8 108
9 109
diff --git a/mysql-test/suite/versioning/r/select2.result b/mysql-test/suite/versioning/r/select2.result
index 353bdf8e696..613fe86e668 100644
--- a/mysql-test/suite/versioning/r/select2.result
+++ b/mysql-test/suite/versioning/r/select2.result
@@ -18,14 +18,10 @@ insert into t1 (x, y) values
(9, 109);
set @t0= now(6);
select sys_start from t1 limit 1 into @x0;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
delete from t1 where x = 3;
delete from t1 where x > 7;
insert into t1(x, y) values(3, 33);
select sys_start from t1 where x = 3 and y = 33 into @t1;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select x, y from t1;
x y
0 100
diff --git a/mysql-test/suite/versioning/r/trx_id.result b/mysql-test/suite/versioning/r/trx_id.result
index 9beec414fbb..d9abbe9cc54 100644
--- a/mysql-test/suite/versioning/r/trx_id.result
+++ b/mysql-test/suite/versioning/r/trx_id.result
@@ -25,15 +25,11 @@ add period for system_time(s, e),
add system versioning,
algorithm=inplace;
select s from t1 into @trx_start;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select count(*) = 1 from mysql.transaction_registry where transaction_id = @trx_start;
count(*) = 1
1
create or replace table t1 (x int);
select count(*) from mysql.transaction_registry into @tmp;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
alter table t1
add column s bigint unsigned as row start,
add column e bigint unsigned as row end,
@@ -52,15 +48,11 @@ add period for system_time(s, e),
add system versioning,
algorithm=copy;
select s from t1 into @trx_start;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select count(*) = 1 from mysql.transaction_registry where transaction_id = @trx_start;
count(*) = 1
1
create or replace table t1 (x int);
select count(*) from mysql.transaction_registry into @tmp;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
alter table t1
add column s bigint unsigned as row start,
add column e bigint unsigned as row end,
@@ -113,14 +105,8 @@ set @ts2= sysdate(6);
commit;
set @ts3= sysdate(6);
select sys_start from t1 where x = 1 into @trx_id1;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select sys_start from t1 where x = 2 into @trx_id2;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select sys_start from t1 where x = 3 into @trx_id3;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select @trx_id1 < @trx_id2, @trx_id2 < @trx_id3;
@trx_id1 < @trx_id2 @trx_id2 < @trx_id3
1 1
@@ -278,8 +264,6 @@ set @ts1= now(6);
insert into t1 values (1);
commit;
select row_start from t1 into @trx_id;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select trt_begin_ts(@trx_id) <= @ts1 as BEGIN_TS_GOOD;
BEGIN_TS_GOOD
1
diff --git a/mysql-test/suite/versioning/r/update,trx_id.rdiff b/mysql-test/suite/versioning/r/update,trx_id.rdiff
index 7ce75714235..5f0e77bf54e 100644
--- a/mysql-test/suite/versioning/r/update,trx_id.rdiff
+++ b/mysql-test/suite/versioning/r/update,trx_id.rdiff
@@ -1,5 +1,5 @@
---- update.result 2018-12-19 13:55:35.873917389 +0300
-+++ update,trx_id.reject 2018-12-19 13:55:35.533917399 +0300
+--- update.result
++++ update,trx_id.reject
@@ -81,12 +81,10 @@
commit;
select x, y, sys_trx_end = MAXVAL as current from t1 for system_time all order by sys_trx_end, x, y;
diff --git a/mysql-test/suite/wsrep/r/wsrep-recover-v25,binlogon.rdiff b/mysql-test/suite/wsrep/r/wsrep-recover-v25,binlogon.rdiff
index 596abf9c681..5fc091c7752 100644
--- a/mysql-test/suite/wsrep/r/wsrep-recover-v25,binlogon.rdiff
+++ b/mysql-test/suite/wsrep/r/wsrep-recover-v25,binlogon.rdiff
@@ -1,5 +1,5 @@
---- r/wsrep-recover-v25.result 2019-02-28 09:20:56.153775856 +0200
-+++ r/wsrep-recover-v25.reject 2019-02-28 09:22:16.578113115 +0200
+--- r/wsrep-recover-v25.result
++++ r/wsrep-recover-v25.reject
@@ -12,4 +12,16 @@
SELECT VARIABLE_VALUE `expect 6` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
expect 6
diff --git a/mysql-test/suite/wsrep/r/wsrep_provider_plugin.result b/mysql-test/suite/wsrep/r/wsrep_provider_plugin.result
new file mode 100644
index 00000000000..abcc15b5c41
--- /dev/null
+++ b/mysql-test/suite/wsrep/r/wsrep_provider_plugin.result
@@ -0,0 +1,24 @@
+# Correct Galera library found
+CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
+SET GLOBAL wsrep_provider_repl_max_ws_size=1;
+SHOW VARIABLES LIKE 'wsrep_provider_repl_max_ws_size';
+Variable_name Value
+wsrep_provider_repl_max_ws_size 1
+INSERT INTO t1 VALUES (1);
+ERROR HY000: Maximum writeset size exceeded
+SET GLOBAL wsrep_provider_repl_max_ws_size=DEFAULT;
+SHOW VARIABLES LIKE 'wsrep_provider_repl_max_ws_size';
+Variable_name Value
+wsrep_provider_repl_max_ws_size 2147483647
+INSERT INTO t1 VALUES (1);
+SET GLOBAL wsrep_provider_options='repl.max_ws_size=1';
+ERROR HY000: Variable 'wsrep_provider_options' is a read only variable
+INSERT INTO t1 VALUES (2);
+SET GLOBAL wsrep_provider='none';
+ERROR HY000: Variable 'wsrep_provider' is a read only variable
+DROP TABLE t1;
+CALL mtr.add_suppression("transaction size limit");
+CALL mtr.add_suppression("rbr write fail");
+SELECT VARIABLE_NAME,READ_ONLY FROM information_schema.system_variables where VARIABLE_NAME like '%wsrep_provider_options%';
+VARIABLE_NAME READ_ONLY
+WSREP_PROVIDER_OPTIONS YES
diff --git a/mysql-test/suite/wsrep/r/wsrep_provider_plugin_basic.result b/mysql-test/suite/wsrep/r/wsrep_provider_plugin_basic.result
new file mode 100644
index 00000000000..23c4d45f40c
--- /dev/null
+++ b/mysql-test/suite/wsrep/r/wsrep_provider_plugin_basic.result
@@ -0,0 +1,66 @@
+# Correct Galera library found
+select variable_type, global_value from information_schema.system_variables where variable_name = 'wsrep_provider_socket_recv_buf_size';
+variable_type global_value
+VARCHAR auto
+set global wsrep_provider_socket_recv_buf_size = 'foo';
+ERROR 42000: Variable 'socket_recv_buf_size' can't be set to the value of 'foo'
+set global wsrep_provider_socket_recv_buf_size = '1M';
+show global variables like 'wsrep_provider_socket_recv_buf_size';
+Variable_name Value
+wsrep_provider_socket_recv_buf_size 1M
+set global wsrep_provider_socket_recv_buf_size = default;
+show global variables like 'wsrep_provider_socket_recv_buf_size';
+Variable_name Value
+wsrep_provider_socket_recv_buf_size auto
+select variable_type, global_value from information_schema.system_variables where variable_name = 'wsrep_provider_evs_send_window';
+variable_type global_value
+BIGINT 4
+set global wsrep_provider_evs_send_window = -10;
+ERROR 42000: Variable 'evs_send_window' can't be set to the value of '-10'
+set global wsrep_provider_evs_send_window = 10;
+show global variables like 'wsrep_provider_evs_send_window';
+Variable_name Value
+wsrep_provider_evs_send_window 10
+set global wsrep_provider_evs_send_window = default;
+show global variables like 'wsrep_provider_evs_send_window';
+Variable_name Value
+wsrep_provider_evs_send_window 4
+select variable_type from information_schema.system_variables where variable_name = 'wsrep_provider_gcs_max_throttle';
+variable_type
+DOUBLE
+set global wsrep_provider_gcs_max_throttle = 1.1;
+ERROR 42000: Variable 'gcs_max_throttle' can't be set to the value of '1.100000'
+set global wsrep_provider_gcs_max_throttle = 0.5;
+show global variables like 'wsrep_provider_gcs_max_throttle';
+Variable_name Value
+wsrep_provider_gcs_max_throttle 0.500000
+set global wsrep_provider_gcs_max_throttle = default;
+show global variables like 'wsrep_provider_gcs_max_throttle';
+Variable_name Value
+wsrep_provider_gcs_max_throttle 0.250000
+select variable_type from information_schema.system_variables where variable_name = 'wsrep_provider_cert_log_conflicts';
+variable_type
+BOOLEAN
+set global wsrep_provider_cert_log_conflicts = on;
+show global variables like 'wsrep_provider_cert_log_conflicts';
+Variable_name Value
+wsrep_provider_cert_log_conflicts ON
+set global wsrep_provider_cert_log_conflicts = off;
+show global variables like 'wsrep_provider_cert_log_conflicts';
+Variable_name Value
+wsrep_provider_cert_log_conflicts OFF
+set global wsrep_provider_cert_log_conflicts = default;
+show global variables like 'wsrep_provider_cert_log_conflicts';
+Variable_name Value
+wsrep_provider_cert_log_conflicts OFF
+select read_only from information_schema.system_variables where variable_name = 'wsrep_provider_evs_auto_evict';
+read_only
+YES
+set global wsrep_provider_evs_auto_evict = on;
+ERROR HY000: Variable 'wsrep_provider_evs_auto_evict' is a read only variable
+set global wsrep_provider_gcs_fc_master_slave = default;
+Warnings:
+Warning 1287 '@@wsrep_provider_gcs_fc_master_slave' is deprecated and will be removed in a future release
+call mtr.add_suppression("error setting param");
+call mtr.add_suppression("Unknown parameter");
+call mtr.add_suppression("Setting parameter");
diff --git a/mysql-test/suite/wsrep/r/wsrep_provider_plugin_defaults.result b/mysql-test/suite/wsrep/r/wsrep_provider_plugin_defaults.result
new file mode 100644
index 00000000000..ce164f66a22
--- /dev/null
+++ b/mysql-test/suite/wsrep/r/wsrep_provider_plugin_defaults.result
@@ -0,0 +1,1270 @@
+# Correct Galera library found
+SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep_provider%' AND VARIABLE_NAME NOT IN (
+'wsrep_provider',
+'wsrep_provider_options',
+'wsrep_provider_base_dir',
+'wsrep_provider_base_port',
+'wsrep_provider_gcache_dir',
+'wsrep_provider_dbug',
+'wsrep_provider_gcache_debug',
+'wsrep_provider_signal',
+'wsrep_provider_gmcast_listen_addr');
+COUNT(*)
+83
+SELECT * FROM INFORMATION_SCHEMA.SYSTEM_VARIABLES
+WHERE VARIABLE_NAME LIKE 'wsrep_provider_%' AND VARIABLE_NAME NOT IN (
+'wsrep_provider',
+'wsrep_provider_options',
+'wsrep_provider_base_dir',
+'wsrep_provider_base_port',
+'wsrep_provider_gcache_dir',
+'wsrep_provider_dbug',
+'wsrep_provider_gcache_debug',
+'wsrep_provider_signal',
+'wsrep_provider_gmcast_listen_addr')
+ORDER BY VARIABLE_NAME;
+VARIABLE_NAME WSREP_PROVIDER_BASE_HOST
+SESSION_VALUE NULL
+GLOBAL_VALUE 127.0.0.1
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 127.0.0.1
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_CERT_LOG_CONFLICTS
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_CERT_OPTIMISTIC_PA
+SESSION_VALUE NULL
+GLOBAL_VALUE ON
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE ON
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_DEBUG
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_AUTO_EVICT
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_CAUSAL_KEEPALIVE_PERIOD
+SESSION_VALUE NULL
+GLOBAL_VALUE 1.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 1.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_DEBUG_LOG_MASK
+SESSION_VALUE NULL
+GLOBAL_VALUE 0x1
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0x1
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_DELAYED_KEEP_PERIOD
+SESSION_VALUE NULL
+GLOBAL_VALUE 30.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 30.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_DELAY_MARGIN
+SESSION_VALUE NULL
+GLOBAL_VALUE 1.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 1.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_EVICT
+SESSION_VALUE NULL
+GLOBAL_VALUE
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_INACTIVE_CHECK_PERIOD
+SESSION_VALUE NULL
+GLOBAL_VALUE 0.500000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0.500000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_INACTIVE_TIMEOUT
+SESSION_VALUE NULL
+GLOBAL_VALUE 15.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 15.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_INFO_LOG_MASK
+SESSION_VALUE NULL
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_INSTALL_TIMEOUT
+SESSION_VALUE NULL
+GLOBAL_VALUE 7.500000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 7.500000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_JOIN_RETRANS_PERIOD
+SESSION_VALUE NULL
+GLOBAL_VALUE 1.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 1.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_KEEPALIVE_PERIOD
+SESSION_VALUE NULL
+GLOBAL_VALUE 1.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 1.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_MAX_INSTALL_TIMEOUTS
+SESSION_VALUE NULL
+GLOBAL_VALUE 3
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 3
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_SEND_WINDOW
+SESSION_VALUE NULL
+GLOBAL_VALUE 4
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 4
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_STATS_REPORT_PERIOD
+SESSION_VALUE NULL
+GLOBAL_VALUE 60.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 60.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_SUSPECT_TIMEOUT
+SESSION_VALUE NULL
+GLOBAL_VALUE 5.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 5.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_USER_SEND_WINDOW
+SESSION_VALUE NULL
+GLOBAL_VALUE 2
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 2
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_USE_AGGREGATE
+SESSION_VALUE NULL
+GLOBAL_VALUE ON
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE ON
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_VERSION
+SESSION_VALUE NULL
+GLOBAL_VALUE 1
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 1
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_EVS_VIEW_FORGET_TIMEOUT
+SESSION_VALUE NULL
+GLOBAL_VALUE 86400.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 86400.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCACHE_KEEP_PAGES_SIZE
+SESSION_VALUE NULL
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCACHE_KEEP_PLAINTEXT_SIZE
+SESSION_VALUE NULL
+GLOBAL_VALUE 134217728
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 134217728
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCACHE_MEM_SIZE
+SESSION_VALUE NULL
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCACHE_NAME
+SESSION_VALUE NULL
+GLOBAL_VALUE galera.cache
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE galera.cache
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCACHE_PAGE_SIZE
+SESSION_VALUE NULL
+GLOBAL_VALUE 134217728
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 134217728
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCACHE_RECOVER
+SESSION_VALUE NULL
+GLOBAL_VALUE ON
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE ON
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCACHE_SIZE
+SESSION_VALUE NULL
+GLOBAL_VALUE 10485760
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 10485760
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCOMM_THREAD_PRIO
+SESSION_VALUE NULL
+GLOBAL_VALUE
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCS_FC_DEBUG
+SESSION_VALUE NULL
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCS_FC_FACTOR
+SESSION_VALUE NULL
+GLOBAL_VALUE 1.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 1.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCS_FC_LIMIT
+SESSION_VALUE NULL
+GLOBAL_VALUE 16
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 16
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCS_FC_MASTER_SLAVE
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCS_FC_SINGLE_PRIMARY
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCS_MAX_PACKET_SIZE
+SESSION_VALUE NULL
+GLOBAL_VALUE 64500
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 64500
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCS_MAX_THROTTLE
+SESSION_VALUE NULL
+GLOBAL_VALUE 0.250000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0.250000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCS_RECV_Q_HARD_LIMIT
+SESSION_VALUE NULL
+GLOBAL_VALUE 9223372036854775807
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 9223372036854775807
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCS_RECV_Q_SOFT_LIMIT
+SESSION_VALUE NULL
+GLOBAL_VALUE 0.250000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0.250000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCS_SYNC_DONOR
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GCS_VOTE_POLICY
+SESSION_VALUE NULL
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GMCAST_GROUP
+SESSION_VALUE NULL
+GLOBAL_VALUE
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GMCAST_MCAST_ADDR
+SESSION_VALUE NULL
+GLOBAL_VALUE
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GMCAST_MCAST_TTL
+SESSION_VALUE NULL
+GLOBAL_VALUE 1
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 1
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GMCAST_PEER_ADDR
+SESSION_VALUE NULL
+GLOBAL_VALUE
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GMCAST_PEER_TIMEOUT
+SESSION_VALUE NULL
+GLOBAL_VALUE 3.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 3.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GMCAST_SEGMENT
+SESSION_VALUE NULL
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GMCAST_TIME_WAIT
+SESSION_VALUE NULL
+GLOBAL_VALUE 5.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 5.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_GMCAST_VERSION
+SESSION_VALUE NULL
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_IST_RECV_ADDR
+SESSION_VALUE NULL
+GLOBAL_VALUE 127.0.0.1
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 127.0.0.1
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_IST_RECV_BIND
+SESSION_VALUE NULL
+GLOBAL_VALUE
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_ANNOUNCE_TIMEOUT
+SESSION_VALUE NULL
+GLOBAL_VALUE 3.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 3.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_BOOTSTRAP
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_CHECKSUM
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_IGNORE_QUORUM
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_IGNORE_SB
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_LINGER
+SESSION_VALUE NULL
+GLOBAL_VALUE 20.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 20.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_NPVO
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_RECOVERY
+SESSION_VALUE NULL
+GLOBAL_VALUE ON
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE ON
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_VERSION
+SESSION_VALUE NULL
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_WAIT_PRIM
+SESSION_VALUE NULL
+GLOBAL_VALUE ON
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE ON
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_WAIT_PRIM_TIMEOUT
+SESSION_VALUE NULL
+GLOBAL_VALUE 30.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 30.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PC_WEIGHT
+SESSION_VALUE NULL
+GLOBAL_VALUE 1
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 1
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_PROTONET_VERSION
+SESSION_VALUE NULL
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_REPL_CAUSAL_READ_TIMEOUT
+SESSION_VALUE NULL
+GLOBAL_VALUE 30.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 30.000000
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE 2.2250738585072e-308
+NUMERIC_MAX_VALUE 1.797693134862316e308
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_REPL_COMMIT_ORDER
+SESSION_VALUE NULL
+GLOBAL_VALUE 3
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 3
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_REPL_KEY_FORMAT
+SESSION_VALUE NULL
+GLOBAL_VALUE FLAT8
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE FLAT8
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_REPL_MAX_WS_SIZE
+SESSION_VALUE NULL
+GLOBAL_VALUE 2147483647
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 2147483647
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_REPL_PROTO_MAX
+SESSION_VALUE NULL
+GLOBAL_VALUE 10
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 10
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_CHECKSUM
+SESSION_VALUE NULL
+GLOBAL_VALUE 2
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 2
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BIGINT
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE -9223372036854775808
+NUMERIC_MAX_VALUE 9223372036854775807
+NUMERIC_BLOCK_SIZE 0
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_DYNAMIC
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_RECV_BUF_SIZE
+SESSION_VALUE NULL
+GLOBAL_VALUE auto
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE auto
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_SEND_BUF_SIZE
+SESSION_VALUE NULL
+GLOBAL_VALUE auto
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE auto
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_SSL
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_SSL_CA
+SESSION_VALUE NULL
+GLOBAL_VALUE
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_SSL_CERT
+SESSION_VALUE NULL
+GLOBAL_VALUE
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_SSL_CIPHER
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_SSL_COMPRESSION
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_SSL_KEY
+SESSION_VALUE NULL
+GLOBAL_VALUE
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_SSL_PASSWORD_FILE
+SESSION_VALUE NULL
+GLOBAL_VALUE
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE VARCHAR
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
+VARIABLE_NAME WSREP_PROVIDER_SOCKET_SSL_RELOAD
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Wsrep provider option
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+GLOBAL_VALUE_PATH NULL
diff --git a/mysql-test/suite/wsrep/r/wsrep_provider_plugin_wsrep_off.result b/mysql-test/suite/wsrep/r/wsrep_provider_plugin_wsrep_off.result
new file mode 100644
index 00000000000..b6ad7e84b95
--- /dev/null
+++ b/mysql-test/suite/wsrep/r/wsrep_provider_plugin_wsrep_off.result
@@ -0,0 +1,5 @@
+SELECT @@wsrep_on;
+@@wsrep_on
+0
+select variable_type, global_value from information_schema.system_variables where variable_name = 'wsrep_provider%';
+variable_type global_value
diff --git a/mysql-test/suite/wsrep/t/variables_debug.test b/mysql-test/suite/wsrep/t/variables_debug.test
index 5e90d61c84e..b218586fea7 100644
--- a/mysql-test/suite/wsrep/t/variables_debug.test
+++ b/mysql-test/suite/wsrep/t/variables_debug.test
@@ -8,7 +8,7 @@
--let $galera_version=26.4.11
source include/check_galera_version.inc;
-source include/galera_variables_ok.inc;
+source include/galera_variables_ok_debug.inc;
--replace_column 2 #
SHOW GLOBAL STATUS LIKE 'wsrep%';
diff --git a/mysql-test/suite/wsrep/t/wsrep_provider_plugin.cnf b/mysql-test/suite/wsrep/t/wsrep_provider_plugin.cnf
new file mode 100644
index 00000000000..c61599acb07
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_provider_plugin.cnf
@@ -0,0 +1,8 @@
+!include ../my.cnf
+
+[mysqld.1]
+wsrep-on=ON
+wsrep-cluster-address=gcomm://
+wsrep-provider=@ENV.WSREP_PROVIDER
+binlog-format=ROW
+plugin-wsrep-provider=ON
diff --git a/mysql-test/suite/wsrep/t/wsrep_provider_plugin.test b/mysql-test/suite/wsrep/t/wsrep_provider_plugin.test
new file mode 100644
index 00000000000..453e5bb0dae
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_provider_plugin.test
@@ -0,0 +1,41 @@
+#
+# Verify that system variables can be modified via wsrep_provider
+# plugin and wsrep_provider/wsrep_provider_options cannot be modified.
+#
+
+--source include/have_wsrep.inc
+--source include/have_innodb.inc
+
+--let $galera_version=26.4.14
+source include/check_galera_version.inc;
+
+CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
+
+SET GLOBAL wsrep_provider_repl_max_ws_size=1;
+SHOW VARIABLES LIKE 'wsrep_provider_repl_max_ws_size';
+
+--error ER_UNKNOWN_ERROR
+INSERT INTO t1 VALUES (1);
+
+SET GLOBAL wsrep_provider_repl_max_ws_size=DEFAULT;
+SHOW VARIABLES LIKE 'wsrep_provider_repl_max_ws_size';
+
+INSERT INTO t1 VALUES (1);
+
+# Variable should be read only, must not take effect
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SET GLOBAL wsrep_provider_options='repl.max_ws_size=1';
+INSERT INTO t1 VALUES (2);
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SET GLOBAL wsrep_provider='none';
+
+DROP TABLE t1;
+
+CALL mtr.add_suppression("transaction size limit");
+CALL mtr.add_suppression("rbr write fail");
+
+#
+# MDEV-30120 :Update the wsrep_provider_options read_only value in the system_variables table.
+#
+SELECT VARIABLE_NAME,READ_ONLY FROM information_schema.system_variables where VARIABLE_NAME like '%wsrep_provider_options%';
diff --git a/mysql-test/suite/wsrep/t/wsrep_provider_plugin_basic.cnf b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_basic.cnf
new file mode 100644
index 00000000000..c61599acb07
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_basic.cnf
@@ -0,0 +1,8 @@
+!include ../my.cnf
+
+[mysqld.1]
+wsrep-on=ON
+wsrep-cluster-address=gcomm://
+wsrep-provider=@ENV.WSREP_PROVIDER
+binlog-format=ROW
+plugin-wsrep-provider=ON
diff --git a/mysql-test/suite/wsrep/t/wsrep_provider_plugin_basic.test b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_basic.test
new file mode 100644
index 00000000000..028546537c6
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_basic.test
@@ -0,0 +1,77 @@
+--source include/have_wsrep.inc
+--source include/have_innodb.inc
+
+--let $galera_version=26.4.14
+source include/check_galera_version.inc;
+
+#
+# Test string option
+#
+
+select variable_type, global_value from information_schema.system_variables where variable_name = 'wsrep_provider_socket_recv_buf_size';
+--error ER_WRONG_VALUE_FOR_VAR
+set global wsrep_provider_socket_recv_buf_size = 'foo';
+set global wsrep_provider_socket_recv_buf_size = '1M';
+show global variables like 'wsrep_provider_socket_recv_buf_size';
+set global wsrep_provider_socket_recv_buf_size = default;
+show global variables like 'wsrep_provider_socket_recv_buf_size';
+
+
+#
+# Test integer option
+#
+
+select variable_type, global_value from information_schema.system_variables where variable_name = 'wsrep_provider_evs_send_window';
+--error ER_WRONG_VALUE_FOR_VAR
+set global wsrep_provider_evs_send_window = -10;
+set global wsrep_provider_evs_send_window = 10;
+show global variables like 'wsrep_provider_evs_send_window';
+set global wsrep_provider_evs_send_window = default;
+show global variables like 'wsrep_provider_evs_send_window';
+
+
+#
+# Test double option
+#
+
+select variable_type from information_schema.system_variables where variable_name = 'wsrep_provider_gcs_max_throttle';
+--error ER_WRONG_VALUE_FOR_VAR
+set global wsrep_provider_gcs_max_throttle = 1.1;
+set global wsrep_provider_gcs_max_throttle = 0.5;
+show global variables like 'wsrep_provider_gcs_max_throttle';
+set global wsrep_provider_gcs_max_throttle = default;
+show global variables like 'wsrep_provider_gcs_max_throttle';
+
+
+#
+# Test bool option
+#
+
+select variable_type from information_schema.system_variables where variable_name = 'wsrep_provider_cert_log_conflicts';
+set global wsrep_provider_cert_log_conflicts = on;
+show global variables like 'wsrep_provider_cert_log_conflicts';
+set global wsrep_provider_cert_log_conflicts = off;
+show global variables like 'wsrep_provider_cert_log_conflicts';
+set global wsrep_provider_cert_log_conflicts = default;
+show global variables like 'wsrep_provider_cert_log_conflicts';
+
+
+#
+# Test read-only option
+#
+
+select read_only from information_schema.system_variables where variable_name = 'wsrep_provider_evs_auto_evict';
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set global wsrep_provider_evs_auto_evict = on;
+
+
+#
+# Test deprecated option (expect warning in result file)
+#
+
+set global wsrep_provider_gcs_fc_master_slave = default;
+
+
+call mtr.add_suppression("error setting param");
+call mtr.add_suppression("Unknown parameter");
+call mtr.add_suppression("Setting parameter");
diff --git a/mysql-test/suite/wsrep/t/wsrep_provider_plugin_defaults.cnf b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_defaults.cnf
new file mode 100644
index 00000000000..c61599acb07
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_defaults.cnf
@@ -0,0 +1,8 @@
+!include ../my.cnf
+
+[mysqld.1]
+wsrep-on=ON
+wsrep-cluster-address=gcomm://
+wsrep-provider=@ENV.WSREP_PROVIDER
+binlog-format=ROW
+plugin-wsrep-provider=ON
diff --git a/mysql-test/suite/wsrep/t/wsrep_provider_plugin_defaults.test b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_defaults.test
new file mode 100644
index 00000000000..bdeef21a38a
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_defaults.test
@@ -0,0 +1,30 @@
+--source include/have_wsrep.inc
+--source include/have_innodb.inc
+
+--let $galera_version=26.4.14
+source include/check_galera_version.inc;
+
+SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep_provider%' AND VARIABLE_NAME NOT IN (
+ 'wsrep_provider',
+ 'wsrep_provider_options',
+ 'wsrep_provider_base_dir',
+ 'wsrep_provider_base_port',
+ 'wsrep_provider_gcache_dir',
+ 'wsrep_provider_dbug',
+ 'wsrep_provider_gcache_debug',
+ 'wsrep_provider_signal',
+ 'wsrep_provider_gmcast_listen_addr');
+
+--vertical_results
+SELECT * FROM INFORMATION_SCHEMA.SYSTEM_VARIABLES
+WHERE VARIABLE_NAME LIKE 'wsrep_provider_%' AND VARIABLE_NAME NOT IN (
+ 'wsrep_provider',
+ 'wsrep_provider_options',
+ 'wsrep_provider_base_dir',
+ 'wsrep_provider_base_port',
+ 'wsrep_provider_gcache_dir',
+ 'wsrep_provider_dbug',
+ 'wsrep_provider_gcache_debug',
+ 'wsrep_provider_signal',
+ 'wsrep_provider_gmcast_listen_addr')
+ORDER BY VARIABLE_NAME;
diff --git a/mysql-test/suite/wsrep/t/wsrep_provider_plugin_wsrep_off.cnf b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_wsrep_off.cnf
new file mode 100644
index 00000000000..53c12cc4dab
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_wsrep_off.cnf
@@ -0,0 +1,12 @@
+# Use default setting for mysqld processes
+!include include/default_mysqld.cnf
+
+[mysqld]
+wsrep-on=OFF
+
+[mysqld.1]
+wsrep-on=OFF
+#galera_port=@OPT.port
+#ist_port=@OPT.port
+#sst_port=@OPT.port
+plugin-wsrep-provider=ON
diff --git a/mysql-test/suite/wsrep/t/wsrep_provider_plugin_wsrep_off.test b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_wsrep_off.test
new file mode 100644
index 00000000000..b99b87696af
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep_provider_plugin_wsrep_off.test
@@ -0,0 +1,6 @@
+--source include/have_wsrep.inc
+--source include/have_innodb.inc
+
+SELECT @@wsrep_on;
+
+select variable_type, global_value from information_schema.system_variables where variable_name = 'wsrep_provider%';
diff --git a/mysys/array.c b/mysys/array.c
index 6e871ee6343..02a54d44656 100644
--- a/mysys/array.c
+++ b/mysys/array.c
@@ -92,7 +92,7 @@ my_bool init_dynamic_array2(PSI_memory_key psi_key, DYNAMIC_ARRAY *array,
my_bool insert_dynamic(DYNAMIC_ARRAY *array, const void * element)
{
void *buffer;
- if (array->elements == array->max_element)
+ if (unlikely(array->elements == array->max_element))
{ /* Call only when necessary */
if (!(buffer=alloc_dynamic(array)))
return TRUE;
@@ -102,7 +102,42 @@ my_bool insert_dynamic(DYNAMIC_ARRAY *array, const void * element)
buffer=array->buffer+(array->elements * array->size_of_element);
array->elements++;
}
- memcpy(buffer,element,(size_t) array->size_of_element);
+ memcpy(buffer, element, array->size_of_element);
+ return FALSE;
+}
+
+
+/* Fast version of appending to dynamic array */
+
+void init_append_dynamic(DYNAMIC_ARRAY_APPEND *append,
+ DYNAMIC_ARRAY *array)
+{
+ append->array= array;
+ append->pos= array->buffer + array->elements * array->size_of_element;
+ append->end= array->buffer + array->max_element * array->size_of_element;
+}
+
+
+my_bool append_dynamic(DYNAMIC_ARRAY_APPEND *append,
+ const void *element)
+{
+ DYNAMIC_ARRAY *array= append->array;
+ size_t size_of_element= array->size_of_element;
+ if (unlikely(append->pos == append->end))
+ {
+ void *buffer;
+ if (!(buffer=alloc_dynamic(array)))
+ return TRUE;
+ append->pos= (uchar*)buffer + size_of_element;
+ append->end= array->buffer + array->max_element * size_of_element;
+ memcpy(buffer, element, size_of_element);
+ }
+ else
+ {
+ array->elements++;
+ memcpy(append->pos, element, size_of_element);
+ append->pos+= size_of_element;
+ }
return FALSE;
}
@@ -281,7 +316,7 @@ my_bool allocate_dynamic(DYNAMIC_ARRAY *array, size_t max_elements)
void get_dynamic(DYNAMIC_ARRAY *array, void *element, size_t idx)
{
- if (idx >= array->elements)
+ if (unlikely(idx >= array->elements))
{
DBUG_PRINT("warning",("To big array idx: %d, array size is %d",
idx,array->elements));
@@ -306,7 +341,7 @@ void delete_dynamic(DYNAMIC_ARRAY *array)
/*
Just mark as empty if we are using a static buffer
*/
- if (!(array->malloc_flags & MY_INIT_BUFFER_USED) && array->buffer)
+ if (array->buffer && !(array->malloc_flags & MY_INIT_BUFFER_USED))
my_free(array->buffer);
array->buffer= 0;
diff --git a/mysys/errors.c b/mysys/errors.c
index d88540fe277..b522590ae7b 100644
--- a/mysys/errors.c
+++ b/mysys/errors.c
@@ -60,6 +60,7 @@ const char *globerrs[GLOBERRS]=
"Lock Pages in memory access rights required",
"Memcntl %s cmd %s error",
"Warning: Charset id '%d' csname '%s' trying to replace existing csname '%s'",
+ "Deprecated program name. It will be removed in a future release, use '%s' instead"
};
void init_glob_errs(void)
@@ -109,6 +110,7 @@ void init_glob_errs()
EE(EE_PERM_LOCK_MEMORY)= "Lock Pages in memory access rights required";
EE(EE_MEMCNTL) = "Memcntl %s cmd %s error";
EE(EE_DUPLICATE_CHARSET)= "Warning: Charset id %d trying to replace csname %s with %s";
+ EE(EE_NAME_DEPRECATED) = "Notice: %s is deprecated and will be removed in a future release, use command '%s'"
}
#endif
diff --git a/mysys/mf_radix.c b/mysys/mf_radix.c
index 8f044cf9b29..89a3c4ac6ce 100644
--- a/mysys/mf_radix.c
+++ b/mysys/mf_radix.c
@@ -26,7 +26,7 @@
/* Radixsort */
-my_bool radixsort_is_appliccable(uint n_items, size_t size_of_element)
+my_bool radixsort_is_applicable(uint n_items, size_t size_of_element)
{
return size_of_element <= 20 && n_items >= 1000 && n_items < 100000;
}
diff --git a/mysys/mf_sort.c b/mysys/mf_sort.c
index 24e875b813e..79bdfdd23aa 100644
--- a/mysys/mf_sort.c
+++ b/mysys/mf_sort.c
@@ -23,7 +23,7 @@ void my_string_ptr_sort(uchar *base, uint items, size_t size)
#if INT_MAX > 65536L
uchar **ptr=0;
- if (radixsort_is_appliccable(items, size) &&
+ if (radixsort_is_applicable(items, size) &&
(ptr= (uchar**) my_malloc(PSI_NOT_INSTRUMENTED,
items * sizeof(char*),MYF(0))))
{
diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c
index 9f13ca7a424..d1b2c599372 100644
--- a/mysys/my_getopt.c
+++ b/mysys/my_getopt.c
@@ -897,7 +897,9 @@ static int setval(const struct my_option *opts, void *value, char *argument,
goto ret;
};
}
+
validate_value(opts->name, argument, option_file);
+
DBUG_RETURN(0);
ret:
diff --git a/mysys/my_init.c b/mysys/my_init.c
index 44488e5848a..73d767377b4 100644
--- a/mysys/my_init.c
+++ b/mysys/my_init.c
@@ -43,6 +43,13 @@ static void setup_codepages();
#define _SC_PAGESIZE _SC_PAGE_SIZE
#endif
+#if defined(__linux__)
+#define EXE_LINKPATH "/proc/self/exe"
+#elif defined(__FreeBSD__)
+/* unfortunately, not mounted by default */
+#define EXE_LINKPATH "/proc/curproc/file"
+#endif
+
extern pthread_key(struct st_my_thread_var*, THR_KEY_mysys);
#define SCALE_SEC 100
@@ -169,15 +176,34 @@ my_bool my_init(void)
mysql_stdin= & instrumented_stdin;
my_progname_short= "unknown";
- if (my_progname)
- my_progname_short= my_progname + dirname_length(my_progname);
-
/* Initialize our mutex handling */
my_mutex_init();
if (my_thread_global_init())
return 1;
+ if (my_progname)
+ {
+ char link_name[FN_REFLEN];
+ my_progname_short= my_progname + dirname_length(my_progname);
+ /*
+ if my_progname_short doesn't start from "mariadb", but it's
+ a symlink to an actual executable, that does - warn the user.
+ First try to find the actual name via /proc, but if it's unmounted
+ (which it usually is on FreeBSD) resort to my_progname
+ */
+ if (strncmp(my_progname_short, "mariadb", 7))
+ {
+ int res= 1;
+#ifdef EXE_LINKPATH
+ res= my_readlink(link_name, EXE_LINKPATH, MYF(0));
+#endif
+ if ((res == 0 || my_readlink(link_name, my_progname, MYF(0)) == 0) &&
+ strncmp(link_name + dirname_length(link_name), "mariadb", 7) == 0)
+ my_error(EE_NAME_DEPRECATED, MYF(MY_WME), link_name);
+ }
+ }
+
#if defined(SAFEMALLOC) && !defined(DBUG_OFF)
dbug_sanity= sf_sanity;
#endif
diff --git a/mysys/my_symlink.c b/mysys/my_symlink.c
index 8238e501e7f..a07f67a179f 100644
--- a/mysys/my_symlink.c
+++ b/mysys/my_symlink.c
@@ -45,7 +45,7 @@ int (*mysys_test_invalid_symlink)(const char *filename)= always_valid;
int my_readlink(char *to, const char *filename, myf MyFlags)
{
#ifndef HAVE_READLINK
- strmov(to,filename);
+ strnmov(to, filename, FN_REFLEN);
return 1;
#else
int result=0;
@@ -58,7 +58,7 @@ int my_readlink(char *to, const char *filename, myf MyFlags)
if ((my_errno=errno) == EINVAL)
{
result= 1;
- strmov(to,filename);
+ strnmov(to, filename, FN_REFLEN);
}
else
{
diff --git a/mysys/my_winfile.c b/mysys/my_winfile.c
index 35bc6b35399..7a1e3e60b12 100644
--- a/mysys/my_winfile.c
+++ b/mysys/my_winfile.c
@@ -89,17 +89,15 @@ static void invalidate_fd(File fd)
/* Get Windows handle for a file descriptor */
HANDLE my_get_osfhandle(File fd)
{
- DBUG_ENTER("my_get_osfhandle");
DBUG_ASSERT(fd >= MY_FILE_MIN && fd < (int)my_file_limit);
- DBUG_RETURN(my_file_info[fd].fhandle);
+ return (my_file_info[fd].fhandle);
}
static int my_get_open_flags(File fd)
{
- DBUG_ENTER("my_get_open_flags");
DBUG_ASSERT(fd >= MY_FILE_MIN && fd < (int)my_file_limit);
- DBUG_RETURN(my_file_info[fd].oflag);
+ return (my_file_info[fd].oflag);
}
/*
@@ -347,10 +345,8 @@ size_t my_win_pread(File Filedes, uchar *Buffer, size_t Count, my_off_t offset)
OVERLAPPED ov= {0};
LARGE_INTEGER li;
- DBUG_ENTER("my_win_pread");
-
if(!Count)
- DBUG_RETURN(0);
+ return(0);
#ifdef _WIN64
if(Count > UINT_MAX)
Count= UINT_MAX;
@@ -369,11 +365,11 @@ size_t my_win_pread(File Filedes, uchar *Buffer, size_t Count, my_off_t offset)
through e.g. a command pipe in windows : see MSDN on ReadFile.
*/
if(lastError == ERROR_HANDLE_EOF || lastError == ERROR_BROKEN_PIPE)
- DBUG_RETURN(0); /*return 0 at EOF*/
+ return(0); /*return 0 at EOF*/
my_osmaperr(lastError);
- DBUG_RETURN((size_t)-1);
+ return((size_t)-1);
}
- DBUG_RETURN(nBytesRead);
+ return(nBytesRead);
}
@@ -382,9 +378,8 @@ size_t my_win_read(File Filedes, uchar *Buffer, size_t Count)
DWORD nBytesRead;
HANDLE hFile;
- DBUG_ENTER("my_win_read");
if(!Count)
- DBUG_RETURN(0);
+ return(0);
#ifdef _WIN64
if(Count > UINT_MAX)
Count= UINT_MAX;
@@ -400,11 +395,11 @@ size_t my_win_read(File Filedes, uchar *Buffer, size_t Count)
through e.g. a command pipe in windows : see MSDN on ReadFile.
*/
if(lastError == ERROR_HANDLE_EOF || lastError == ERROR_BROKEN_PIPE)
- DBUG_RETURN(0); /*return 0 at EOF*/
+ return(0); /*return 0 at EOF*/
my_osmaperr(lastError);
- DBUG_RETURN((size_t)-1);
+ return((size_t)-1);
}
- DBUG_RETURN(nBytesRead);
+ return(nBytesRead);
}
@@ -416,12 +411,8 @@ size_t my_win_pwrite(File Filedes, const uchar *Buffer, size_t Count,
OVERLAPPED ov= {0};
LARGE_INTEGER li;
- DBUG_ENTER("my_win_pwrite");
- DBUG_PRINT("my",("Filedes: %d, Buffer: %p, Count: %llu, offset: %llu",
- Filedes, Buffer, (ulonglong)Count, (ulonglong)offset));
-
if(!Count)
- DBUG_RETURN(0);
+ return(0);
#ifdef _WIN64
if(Count > UINT_MAX)
@@ -436,10 +427,10 @@ size_t my_win_pwrite(File Filedes, const uchar *Buffer, size_t Count,
if(!WriteFile(hFile, Buffer, (DWORD)Count, &nBytesWritten, &ov))
{
my_osmaperr(GetLastError());
- DBUG_RETURN((size_t)-1);
+ return((size_t)-1);
}
else
- DBUG_RETURN(nBytesWritten);
+ return(nBytesWritten);
}
@@ -448,11 +439,9 @@ my_off_t my_win_lseek(File fd, my_off_t pos, int whence)
LARGE_INTEGER offset;
LARGE_INTEGER newpos;
- DBUG_ENTER("my_win_lseek");
-
/* Check compatibility of Windows and Posix seek constants */
- compile_time_assert(FILE_BEGIN == SEEK_SET && FILE_CURRENT == SEEK_CUR
- && FILE_END == SEEK_END);
+ compile_time_assert(FILE_BEGIN == SEEK_SET && FILE_CURRENT == SEEK_CUR &&
+ FILE_END == SEEK_END);
offset.QuadPart= pos;
if(!SetFilePointerEx(my_get_osfhandle(fd), offset, &newpos, whence))
@@ -460,7 +449,7 @@ my_off_t my_win_lseek(File fd, my_off_t pos, int whence)
my_osmaperr(GetLastError());
newpos.QuadPart= -1;
}
- DBUG_RETURN(newpos.QuadPart);
+ return(newpos.QuadPart);
}
@@ -474,12 +463,8 @@ size_t my_win_write(File fd, const uchar *Buffer, size_t Count)
OVERLAPPED *pov= NULL;
HANDLE hFile;
- DBUG_ENTER("my_win_write");
- DBUG_PRINT("my",("Filedes: %d, Buffer: %p, Count %llu", fd, Buffer,
- (ulonglong)Count));
-
if(!Count)
- DBUG_RETURN(0);
+ return(0);
#ifdef _WIN64
if(Count > UINT_MAX)
@@ -502,9 +487,9 @@ size_t my_win_write(File fd, const uchar *Buffer, size_t Count)
if(!WriteFile(hFile, Buffer, (DWORD)Count, &nWritten, pov))
{
my_osmaperr(GetLastError());
- DBUG_RETURN((size_t)-1);
+ return((size_t)-1);
}
- DBUG_RETURN(nWritten);
+ return(nWritten);
}
@@ -512,7 +497,6 @@ int my_win_chsize(File fd, my_off_t newlength)
{
HANDLE hFile;
LARGE_INTEGER length;
- DBUG_ENTER("my_win_chsize");
hFile= (HANDLE) my_get_osfhandle(fd);
length.QuadPart= newlength;
@@ -520,11 +504,11 @@ int my_win_chsize(File fd, my_off_t newlength)
goto err;
if (!SetEndOfFile(hFile))
goto err;
- DBUG_RETURN(0);
+ return(0);
err:
my_osmaperr(GetLastError());
my_errno= errno;
- DBUG_RETURN(-1);
+ return(-1);
}
@@ -555,7 +539,6 @@ File my_win_handle2File(HANDLE hFile)
{
int retval= -1;
uint i;
-
DBUG_ENTER("my_win_handle2File");
for(i= MY_FILE_MIN; i < my_file_limit; i++)
@@ -623,7 +606,6 @@ FILE * my_win_fdopen(File fd, const char *type)
FILE *file;
int crt_fd;
int flags= 0;
-
DBUG_ENTER("my_win_fdopen");
if(strchr(type,'a') != NULL)
@@ -641,8 +623,8 @@ FILE * my_win_fdopen(File fd, const char *type)
int my_win_fclose(FILE *file)
{
File fd;
-
DBUG_ENTER("my_win_fclose");
+
fd= my_fileno(file);
if(fd < 0)
DBUG_RETURN(-1);
@@ -665,7 +647,6 @@ int my_win_fstat(File fd, struct _stati64 *buf)
int crt_fd;
int retval;
HANDLE hFile, hDup;
-
DBUG_ENTER("my_win_fstat");
hFile= my_get_osfhandle(fd);
diff --git a/plugin/aws_key_management/aws_key_management_plugin.cc b/plugin/aws_key_management/aws_key_management_plugin.cc
index 7740c2eae60..496c7704517 100644
--- a/plugin/aws_key_management/aws_key_management_plugin.cc
+++ b/plugin/aws_key_management/aws_key_management_plugin.cc
@@ -82,6 +82,7 @@ static unsigned long log_level;
static int rotate_key;
static int request_timeout;
static char* endpoint_url;
+static char* keyfile_dir;
#ifndef DBUG_OFF
#define WITH_AWS_MOCK 1
@@ -187,13 +188,23 @@ protected:
}
};
-/* Get list of files in current directory */
-static vector<string> traverse_current_directory()
+/* Get keyfile directory */
+static const char * get_keyfile_dir()
+{
+ if (keyfile_dir && keyfile_dir[0])
+ return keyfile_dir;
+ return ".";
+}
+
+/* Get list of files in keyfile directory */
+static vector<string> traverse_keyfile_directory()
{
vector<string> v;
#ifdef _WIN32
WIN32_FIND_DATA find_data;
- HANDLE h= FindFirstFile("*.*", &find_data);
+ char path[FN_REFLEN];
+ snprintf(path, sizeof(path), "%s\\*.*", get_keyfile_dir());
+ HANDLE h= FindFirstFile(path, &find_data);
if (h == INVALID_HANDLE_VALUE)
return v;
do
@@ -203,7 +214,7 @@ static vector<string> traverse_current_directory()
while (FindNextFile(h, &find_data));
FindClose(h);
#else
- DIR *dir = opendir(".");
+ DIR *dir = opendir(get_keyfile_dir());
if (!dir)
return v;
struct dirent *e;
@@ -272,7 +283,7 @@ static int plugin_init(void *p)
if (init())
return -1;
- vector<string> files= traverse_current_directory();
+ vector<string> files= traverse_keyfile_directory();
for (size_t i=0; i < files.size(); i++)
{
@@ -316,7 +327,7 @@ static int plugin_deinit(void *p)
/* Generate filename to store the ciphered key */
static void format_keyfile_name(char *buf, size_t size, uint key_id, uint version)
{
- snprintf(buf, size, "aws-kms-key.%u.%u", key_id, version);
+ snprintf(buf, size, "%s%saws-kms-key.%u.%u", get_keyfile_dir(), IF_WIN("\\","/"), key_id, version);
}
/* Extract key id and version from file name */
@@ -336,7 +347,7 @@ static int extract_id_and_version(const char *name, uint *id, uint *ver)
static int load_key(KEY_INFO *info)
{
int ret;
- char path[256];
+ char path[FN_REFLEN];
format_keyfile_name(path, sizeof(path), info->key_id, info->key_version);
ret= read_and_decrypt_key(path, info);
@@ -531,7 +542,7 @@ static int generate_and_save_datakey(uint keyid, uint version)
return -1;
string out;
- char filename[20];
+ char filename[FN_REFLEN];
format_keyfile_name(filename, sizeof(filename), keyid, version);
int fd= open(filename, O_WRONLY |O_CREAT|O_BINARY, IF_WIN(_S_IREAD, S_IRUSR| S_IRGRP| S_IROTH));
if (fd < 0)
@@ -652,7 +663,6 @@ static unsigned int get_key(
return(0);
}
-
/* Plugin defs */
struct st_mariadb_encryption aws_key_management_plugin= {
MariaDB_ENCRYPTION_INTERFACE_VERSION,
@@ -725,6 +735,12 @@ static MYSQL_SYSVAR_STR(endpoint_url, endpoint_url,
"Used to override the default AWS API endpoint. If not set, the default will be used",
NULL, NULL, "");
+static MYSQL_SYSVAR_STR(keyfile_dir, keyfile_dir,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Define the directory in which to save key files for the AWS key"
+ "management plugin. If not set, the root datadir will be used",
+ NULL, NULL, "");
+
#if WITH_AWS_MOCK
static MYSQL_SYSVAR_BOOL(mock, mock,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
@@ -740,6 +756,7 @@ static struct st_mysql_sys_var* settings[]= {
MYSQL_SYSVAR(request_timeout),
MYSQL_SYSVAR(region),
MYSQL_SYSVAR(endpoint_url),
+ MYSQL_SYSVAR(keyfile_dir),
#if WITH_AWS_MOCK
MYSQL_SYSVAR(mock),
#endif
diff --git a/plugin/hashicorp_key_management/mysql-test/vault/t/hashicorp_key_rotation_age.test b/plugin/hashicorp_key_management/mysql-test/vault/t/hashicorp_key_rotation_age.test
index ce99406ab06..c446036aaae 100644
--- a/plugin/hashicorp_key_management/mysql-test/vault/t/hashicorp_key_rotation_age.test
+++ b/plugin/hashicorp_key_management/mysql-test/vault/t/hashicorp_key_rotation_age.test
@@ -33,7 +33,7 @@ let $restart_parameters=$default_parameters --innodb_encryption_threads=5 --inno
--echo # Wait until encryption threads have encrypted all tablespaces
---let $tables_count= `select count(*) + 1 from information_schema.tables where engine = 'InnoDB'`
+--let $tables_count= `select count(*) + 1 + @@global.innodb_undo_tablespaces from information_schema.tables where engine = 'InnoDB'`
--let $wait_timeout= 600
--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
--source include/wait_condition.inc
@@ -63,7 +63,7 @@ alter table t33 encryption_key_id=222;
--echo # Wait until encryption threads have encrypted all tablespaces
---let $tables_count= `select count(*) from information_schema.tables where engine = 'InnoDB'`
+--let $tables_count= `select count(*) + @@global.innodb_undo_tablespaces from information_schema.tables where engine = 'InnoDB'`
--let $wait_timeout= 600
--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
--source include/wait_condition.inc
@@ -77,7 +77,7 @@ set global innodb_encrypt_tables = OFF;
--echo # Wait until encryption threads to decrypt all encrypted tablespaces
---let $tables_count= `select count(*) - 1 from information_schema.tables where engine = 'InnoDB'`
+--let $tables_count= `select count(*) - 1 + @@global.innodb_undo_tablespaces from information_schema.tables where engine = 'InnoDB'`
--let $wait_timeout= 600
--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 AND ROTATING_OR_FLUSHING = 0;
--source include/wait_condition.inc
@@ -105,7 +105,7 @@ set global innodb_encrypt_tables = ON;
--echo # Wait until encryption threads to encrypt all unencrypted tablespaces
---let $tables_count= `select count(*) from information_schema.tables where engine = 'InnoDB'`
+--let $tables_count= `select count(*) + @@global.innodb_undo_tablespaces from information_schema.tables where engine = 'InnoDB'`
--let $wait_timeout= 600
--let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0;
--source include/wait_condition.inc
diff --git a/plugin/type_inet/mysql-test/type_inet/type_inet4.result b/plugin/type_inet/mysql-test/type_inet/type_inet4.result
index 7763b28e1fb..30cb6f7fe94 100644
--- a/plugin/type_inet/mysql-test/type_inet/type_inet4.result
+++ b/plugin/type_inet/mysql-test/type_inet/type_inet4.result
@@ -1337,7 +1337,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN SELECT * FROM t1 WHERE b IN (SELECT a AS a_inner FROM t1 GROUP BY a_inner);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 2 Using index; Using where
+2 DEPENDENT SUBQUERY t1 index_subquery a a 5 func 1 Using index; Using where
SET @@optimizer_switch=DEFAULT;
DROP TABLE t1;
#
diff --git a/plugin/type_inet/mysql-test/type_inet/type_inet6.result b/plugin/type_inet/mysql-test/type_inet/type_inet6.result
index 8d52e85cb91..b399400ab87 100644
--- a/plugin/type_inet/mysql-test/type_inet/type_inet6.result
+++ b/plugin/type_inet/mysql-test/type_inet/type_inet6.result
@@ -1330,7 +1330,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN SELECT * FROM t1 WHERE b IN (SELECT a AS a_inner FROM t1 GROUP BY a_inner);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY t1 index_subquery a a 17 func 2 Using index; Using where
+2 DEPENDENT SUBQUERY t1 index_subquery a a 17 func 1 Using index; Using where
SET @@optimizer_switch=DEFAULT;
DROP TABLE t1;
#
diff --git a/plugin/type_uuid/mysql-test/type_uuid/type_uuid.result b/plugin/type_uuid/mysql-test/type_uuid/type_uuid.result
index ab30c262148..b3955d09735 100644
--- a/plugin/type_uuid/mysql-test/type_uuid/type_uuid.result
+++ b/plugin/type_uuid/mysql-test/type_uuid/type_uuid.result
@@ -2524,7 +2524,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN SELECT * FROM t1 WHERE b IN (SELECT a AS a_inner FROM t1 GROUP BY a_inner);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 DEPENDENT SUBQUERY t1 index_subquery a a 17 func 2 Using index; Using where
+2 DEPENDENT SUBQUERY t1 index_subquery a a 17 func 1 Using index; Using where
SET @@optimizer_switch=DEFAULT;
DROP TABLE t1;
#
diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt
index 9eec793c9fb..7f9c92f1cf2 100644
--- a/scripts/CMakeLists.txt
+++ b/scripts/CMakeLists.txt
@@ -223,7 +223,7 @@ IF(UNIX AND NOT WITHOUT_SERVER)
DESTINATION ${INSTALL_SCRIPTDIR}
COMPONENT Server)
- INSTALL_LINK(mariadb-install-db mysql_install_db ${INSTALL_SCRIPTDIR} Server)
+ INSTALL_LINK(mariadb-install-db mysql_install_db ${INSTALL_SCRIPTDIR} ServerSymlinks)
ENDIF()
SET(prefix "${CMAKE_INSTALL_PREFIX}")
@@ -385,7 +385,7 @@ ELSE()
# Create symlink
IF (NOT ${binname} STREQUAL ${file})
- INSTALL_LINK(${file} ${binname} ${INSTALL_BINDIR} ${${file}_COMPONENT})
+ INSTALL_LINK(${file} ${binname} ${INSTALL_BINDIR} ${${file}_COMPONENT}Symlinks)
ENDIF()
ENDFOREACH()
ENDIF()
diff --git a/scripts/convert-debug-for-diff.sh b/scripts/convert-debug-for-diff.sh
index 5b3ce05b815..3a22c74555d 100755
--- a/scripts/convert-debug-for-diff.sh
+++ b/scripts/convert-debug-for-diff.sh
@@ -18,8 +18,28 @@
while (<>)
{
- s/^T@[0-9]+\s*://g;
- s/0x[0-9a-f]+(\s|\n|\))/#$1/g;
- s/size: [0-9]+/size: #/g;
+ s/^T@[0-9]+ *://g;
+ s/0x[0-9a-f]+(\s|\n|\)|=|,|;)/#$1/g;
+ s/bitmap: [0-9a-fA-F]+$/bitmap: #/g;
+ s/size: [0-9-]+/size: #/g;
+ s/memory_used: [0-9]+/memory_used: #/g;
+ s/memory_used: -[0-9]+/memory_used: #/g;
+ s/Total alloc: [0-9]+/Total alloc: #/g;
+ s/(proc_info: )(.*:)[\d]+ /$1 /;
+ s/(select_cond.*) at line.*/$1/;
+ s/\(id: \d+ -> \d+\)/id: #->#/g;
+ s/(exit: found key at )\d+/$1#/g;
+ s/enter_stage: (.* at).*/enter_stage $1 ../g;
+ s/crc: [0-9]+/crc: #/g;
+ s/ref_count: [0-9]+/ref_count: #/g;
+ s/block: # \(\d+\)/block: # (#)/g;
+ s/delete_mutex: # mutex: # \(id: \d+ \<\- \d+\)/delete_mutex: # mutex: # (id: # <- #)/g;
+ s/ShortTrID: [0-9]+/ShortTrID: #/g;
+ s/timestamp:[0-9]+/timestamp:#/g;
+ s/#sql_.*_(\d+)/#sql_xxx_$1/g;
+ s/fd: [0-9]+/fd: #/g;
+ s/query_id: (\d+)/query_id: #/g;
+ s|: .*/mysql-test/var/tmp/mysqld\.\d|: var/tmp/mysqld|g;
+ s|: .*\\mysql-test\\var\\tmp\\mysqld\.\d|: var/tmp/mysqld|g;
print $_;
}
diff --git a/scripts/fill_help_tables.sql b/scripts/fill_help_tables.sql
index f50512a449d..1ce2d583b2b 100644
--- a/scripts/fill_help_tables.sql
+++ b/scripts/fill_help_tables.sql
@@ -85,7 +85,7 @@ insert into help_category (help_category_id,name,parent_category_id,url) values
insert into help_category (help_category_id,name,parent_category_id,url) values (50,'Prepared Statements',1,'');
insert into help_topic (help_topic_id,help_category_id,name,description,example,url) values (1,9,'HELP_DATE','Help Contents generated from the MariaDB Knowledge Base on 10 February 2023.','','');
-insert into help_topic (help_topic_id,help_category_id,name,description,example,url) values (2,9,'HELP_VERSION','Help Contents generated for MariaDB 10.11 from the MariaDB Knowledge Base on 10 February 2023.','','');
+insert into help_topic (help_topic_id,help_category_id,name,description,example,url) values (2,9,'HELP_VERSION','Help Contents generated for MariaDB 11.0 from the MariaDB Knowledge Base on 10 February 2023.','','');
insert into help_topic (help_topic_id,help_category_id,name,description,example,url) values (3,2,'AREA','A synonym for ST_AREA.\n\nURL: https://mariadb.com/kb/en/polygon-properties-area/','','https://mariadb.com/kb/en/polygon-properties-area/');
insert into help_topic (help_topic_id,help_category_id,name,description,example,url) values (4,2,'CENTROID','A synonym for ST_CENTROID.\n\nURL: https://mariadb.com/kb/en/centroid/','','https://mariadb.com/kb/en/centroid/');
insert into help_topic (help_topic_id,help_category_id,name,description,example,url) values (5,2,'ExteriorRing','A synonym for ST_ExteriorRing.\n\nURL: https://mariadb.com/kb/en/polygon-properties-exteriorring/','','https://mariadb.com/kb/en/polygon-properties-exteriorring/');
diff --git a/scripts/mysql_convert_table_format.sh b/scripts/mysql_convert_table_format.sh
index 6b4d758a513..c3a719e4543 100644
--- a/scripts/mysql_convert_table_format.sh
+++ b/scripts/mysql_convert_table_format.sh
@@ -19,6 +19,9 @@
use DBI;
use Getopt::Long;
+warn "$0: Deprecated program name. It will be removed in a future release, use 'mariadb-convert-table-format' instead\n"
+ if $0 =~ m/mysql_convert_table_format$/;
+
$opt_help=$opt_version=$opt_verbose=$opt_force=0;
$opt_user=$opt_database=$opt_password=undef;
$opt_host="localhost";
diff --git a/scripts/mysql_find_rows.sh b/scripts/mysql_find_rows.sh
index 09bcc22790a..a54ceee4071 100644
--- a/scripts/mysql_find_rows.sh
+++ b/scripts/mysql_find_rows.sh
@@ -18,6 +18,9 @@ $version="1.02";
use Getopt::Long;
+warn "$0: Deprecated program name. It will be removed in a future release, use 'mariadb-find-rows' instead\n"
+ if $0 =~ m/mysql_find_rows$/;
+
$opt_help=$opt_Information=$opt_skip_use_db=0;
$opt_regexp=$opt_dbregexp=".*";
$opt_start_row=1; $opt_rows=9999999999;
diff --git a/scripts/mysql_fix_extensions.sh b/scripts/mysql_fix_extensions.sh
index c0de813ef1f..f4e2c260f82 100644
--- a/scripts/mysql_fix_extensions.sh
+++ b/scripts/mysql_fix_extensions.sh
@@ -25,6 +25,8 @@
# makes .frm lowercase and .MYI/MYD/ISM/ISD uppercase
# useful when datafiles are copied from windows
+print STDERR "WARNING: This script is deprecated and will be removed in a future release\n\n";
+
die "Usage: $0 datadir\n" unless -d $ARGV[0];
for $a (<$ARGV[0]/*/*.*>) { $_=$a;
diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh
index 065aa7bd3fe..5fc60567e11 100644
--- a/scripts/mysql_install_db.sh
+++ b/scripts/mysql_install_db.sh
@@ -47,6 +47,12 @@ extra_file=""
dirname0=`dirname $0 2>/dev/null`
dirname0=`dirname $dirname0 2>/dev/null`
+case "$0" in
+ *mysqld_install_db)
+ echo "$0: Deprecated program name. It will be removed in a future release, use 'mariadb-install-db' instead" 1>&2
+ ;;
+esac
+
usage()
{
cat <<EOF
@@ -77,7 +83,7 @@ Usage: $0 [OPTIONS]
--defaults-group-suffix=name
In addition to the given groups, read also groups with
this suffix
- --force Causes mysql_install_db to run even if DNS does not
+ --force Causes mariadb-install-db to run even if DNS does not
work. In that case, grant table entries that
normally use hostnames will use IP addresses.
--help Display this help and exit.
@@ -123,7 +129,7 @@ s_echo()
link_to_help()
{
echo
- echo "The latest information about mysql_install_db is available at"
+ echo "The latest information about mariadb-install-db is available at"
echo "https://mariadb.com/kb/en/installing-system-tables-mysql_install_db"
}
@@ -537,7 +543,7 @@ fi
if test -f "$ldata/mysql/user.frm"
then
echo "mysql.user table already exists!"
- echo "Run mysql_upgrade, not mysql_install_db"
+ echo "Run mariadb-upgrade, not mariadb-install-db"
exit 0
fi
@@ -609,7 +615,7 @@ cat_sql()
s_echo "Installing MariaDB/MySQL system tables in '$ldata' ..."
if cat_sql | eval "$filter_cmd_line" | mysqld_install_cmd_line > /dev/null
then
- printf "@VERSION@-MariaDB" > "$ldata/mysql_upgrade_info"
+ printf "@VERSION@-MariaDB" > "$ldata/mariadb_upgrade_info"
s_echo "OK"
else
log_file_place=$ldata
@@ -690,7 +696,7 @@ then
echo "You can start the MariaDB daemon with:"
echo "cd '$basedir' ; $bindir/mariadb-safe --datadir='$ldata'"
echo
- echo "You can test the MariaDB daemon with mysql-test-run.pl"
+ echo "You can test the MariaDB daemon with mariadb-test-run.pl"
echo "cd '$basedir/@INSTALL_MYSQLTESTDIR@' ; perl mariadb-test-run.pl"
fi
diff --git a/scripts/mysql_secure_installation.sh b/scripts/mysql_secure_installation.sh
index 40d8e5d330e..06229cf3d25 100644
--- a/scripts/mysql_secure_installation.sh
+++ b/scripts/mysql_secure_installation.sh
@@ -29,6 +29,12 @@ defaults_file=
defaults_extra_file=
no_defaults=
+case "$0" in
+ *mysql_secure_installation)
+ echo "$0: Deprecated program name. It will be removed in a future release, use 'mariadb-secure-installation' instead" 1>&2
+ ;;
+esac
+
parse_arg()
{
echo "$1" | sed -e 's/^[^=]*=//'
diff --git a/scripts/mysql_setpermission.sh b/scripts/mysql_setpermission.sh
index b3c9c27ca88..76c07b6816b 100644
--- a/scripts/mysql_setpermission.sh
+++ b/scripts/mysql_setpermission.sh
@@ -55,6 +55,9 @@ use vars qw($dbh $sth $hostname $opt_user $opt_password $opt_help $opt_host
my $sqlhost = "";
my $user = "";
+warn "$0: Deprecated program name. It will be removed in a future release, use 'mariadb-setpermission' instead\n"
+ if $0 =~ m/mysql_setpermission$/;
+
$dbh=$host=$opt_user= $opt_password= $opt_help= $opt_host= $opt_socket= "";
$opt_port=3306;
diff --git a/scripts/mysqlaccess.sh b/scripts/mysqlaccess.sh
index c9b1b72dc21..71f77cf8a11 100644
--- a/scripts/mysqlaccess.sh
+++ b/scripts/mysqlaccess.sh
@@ -32,6 +32,8 @@ BEGIN {
$script = 'MySQLAccess' unless $script;
$script_conf = "$script.conf";
$script_log = $ENV{'HOME'}."/$script.log";
+ warn "$0: Deprecated program name. It will be removed in a future release, use 'mariadb-access' instead\n"
+ if $0 =~ m/mysqlaccess$/;
# ****************************
# information on MariaDB
diff --git a/scripts/mysqld_multi.sh b/scripts/mysqld_multi.sh
index 9a1f3920f67..4e774e3c5ee 100644
--- a/scripts/mysqld_multi.sh
+++ b/scripts/mysqld_multi.sh
@@ -50,6 +50,8 @@ $homedir = $ENV{HOME};
$my_progname = $0;
$my_progname =~ s/.*[\/]//;
+warn "$0: Deprecated program name. It will be removed in a future release, use 'mariadb-multi' instead\n"
+ if $0 =~ m/mysqld_multi$/;
if (defined($ENV{UMASK})) {
my $UMASK = $ENV{UMASK};
diff --git a/scripts/mysqld_safe.sh b/scripts/mysqld_safe.sh
index 34a2de1c119..0802cf0614a 100644
--- a/scripts/mysqld_safe.sh
+++ b/scripts/mysqld_safe.sh
@@ -40,6 +40,12 @@ syslog_tag_mysqld_safe=mysqld_safe
trap '' 1 2 3 15 # we shouldn't let anyone kill us
+case "$0" in
+ *mysqld_safe)
+ echo "$0: Deprecated program name. It will be removed in a future release, use 'mariadb-safe' instead" 1>&2
+ ;;
+esac
+
# MySQL-specific environment variable. First off, it's not really a umask,
# it's the desired mode. Second, it follows umask(2), not umask(3) in that
# octal needs to be explicit. Our shell might be a proper sh without printf,
diff --git a/scripts/mysqldumpslow.sh b/scripts/mysqldumpslow.sh
index 5c46587e9e4..582f5c404fb 100644
--- a/scripts/mysqldumpslow.sh
+++ b/scripts/mysqldumpslow.sh
@@ -26,6 +26,9 @@
use strict;
use Getopt::Long;
+warn "$0: Deprecated program name. It will be removed in a future release, use 'mariadb-dumpslow' instead\n"
+ if $0 =~ m/mysqldumpslow$/;
+
# t=time, l=lock time, r=rows, a=rows affected
# at, al, ar and aa are the corresponding averages
diff --git a/scripts/mysqlhotcopy.sh b/scripts/mysqlhotcopy.sh
index 44abcfec055..a4445612beb 100644
--- a/scripts/mysqlhotcopy.sh
+++ b/scripts/mysqlhotcopy.sh
@@ -27,6 +27,9 @@ use Sys::Hostname;
use File::Copy;
use File::Temp qw(tempfile);
+warn "$0: Deprecated program name. It will be removed in a future release, use 'mariadb-hotcopy' instead\n"
+ if $0 =~ m/mysqlhotcopy$/;
+
=head1 NAME
mysqlhotcopy - fast on-line hot-backup utility for local MySQL databases and tables
diff --git a/scripts/sys_schema/CMakeLists.txt b/scripts/sys_schema/CMakeLists.txt
index ccb268cc4fd..dc023174fc7 100644
--- a/scripts/sys_schema/CMakeLists.txt
+++ b/scripts/sys_schema/CMakeLists.txt
@@ -130,6 +130,7 @@ ${CMAKE_CURRENT_SOURCE_DIR}/views/p_s/session_ssl_status.sql
${CMAKE_CURRENT_SOURCE_DIR}/procedures/create_synonym_db.sql
${CMAKE_CURRENT_SOURCE_DIR}/procedures/execute_prepared_stmt.sql
${CMAKE_CURRENT_SOURCE_DIR}/procedures/diagnostics.sql
+${CMAKE_CURRENT_SOURCE_DIR}/procedures/optimizer_switch.sql
${CMAKE_CURRENT_SOURCE_DIR}/procedures/ps_statement_avg_latency_histogram.sql
${CMAKE_CURRENT_SOURCE_DIR}/procedures/ps_trace_statement_digest.sql
${CMAKE_CURRENT_SOURCE_DIR}/procedures/ps_trace_thread.sql
diff --git a/scripts/sys_schema/procedures/optimizer_switch.sql b/scripts/sys_schema/procedures/optimizer_switch.sql
new file mode 100644
index 00000000000..febeabc1208
--- /dev/null
+++ b/scripts/sys_schema/procedures/optimizer_switch.sql
@@ -0,0 +1,69 @@
+-- Copyright (C) 2023, MariaDB
+--
+-- This program is free software; you can redistribute it and/or modify
+-- it under the terms of the GNU General Public License as published by
+-- the Free Software Foundation; version 2 of the License.
+--
+-- This program is distributed in the hope that it will be useful,
+-- but WITHOUT ANY WARRANTY; without even the implied warranty of
+-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+-- GNU General Public License for more details.
+--
+-- You should have received a copy of the GNU General Public License
+-- along with this program; if not, write to the Free Software
+-- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+DROP PROCEDURE IF EXISTS optimizer_switch_choice;
+DROP PROCEDURE IF EXISTS optimizer_switch_on;
+DROP PROCEDURE IF EXISTS optimizer_switch_off;
+DELIMITER $$
+
+CREATE DEFINER='mariadb.sys'@'localhost' PROCEDURE optimizer_switch_choice(IN on_off VARCHAR(3))
+COMMENT 'return @@optimizer_switch options as a result set for easier readability'
+SQL SECURITY INVOKER
+NOT DETERMINISTIC
+CONTAINS SQL
+BEGIN
+ DECLARE tmp VARCHAR(1024);
+ DECLARE opt VARCHAR(1024);
+ DECLARE start INT;
+ DECLARE end INT;
+ DECLARE pos INT;
+ set tmp=concat(@@optimizer_switch,",");
+ CREATE OR REPLACE TEMPORARY TABLE tmp_opt_switch (a varchar(64), opt CHAR(3)) character set latin1 engine=heap;
+ set start=1;
+ FIND_OPTIONS:
+ LOOP
+ set pos= INSTR(SUBSTR(tmp, start), ",");
+ if (pos = 0) THEN
+ LEAVE FIND_OPTIONS;
+ END IF;
+ set opt= MID(tmp, start, pos-1);
+ set end= INSTR(opt, "=");
+ insert into tmp_opt_switch values(LEFT(opt,end-1),SUBSTR(opt,end+1));
+ set start=start + pos;
+ END LOOP;
+ SELECT t.a as "option",t.opt from tmp_opt_switch as t where t.opt = on_off order by a;
+ DROP TEMPORARY TABLE tmp_opt_switch;
+END$$
+
+CREATE DEFINER='mariadb.sys'@'localhost' PROCEDURE optimizer_switch_on()
+COMMENT 'return @@optimizer_switch options that are on'
+SQL SECURITY INVOKER
+NOT DETERMINISTIC
+CONTAINS SQL
+BEGIN
+ call optimizer_switch_choice("on");
+END$$
+
+CREATE DEFINER='mariadb.sys'@'localhost' PROCEDURE optimizer_switch_off()
+COMMENT 'return @@optimizer_switch options that are off'
+SQL SECURITY INVOKER
+NOT DETERMINISTIC
+CONTAINS SQL
+BEGIN
+ call optimizer_switch_choice("off");
+END$$
+
+DELIMITER ;
+
diff --git a/scripts/wsrep_sst_mysqldump.sh b/scripts/wsrep_sst_mysqldump.sh
index 82d8d4edd71..5e4a193b747 100644
--- a/scripts/wsrep_sst_mysqldump.sh
+++ b/scripts/wsrep_sst_mysqldump.sh
@@ -40,7 +40,7 @@ then
fi
# Check client version
-if ! $MYSQL_CLIENT --version | grep -q -E 'Distrib 10\.[1-9]'; then
+if ! $MYSQL_CLIENT --version | grep -q -E '(Distrib 10\.[1-9])|( from 1[1-9]\.)'; then
$MYSQL_CLIENT --version >&2
wsrep_log_error "this operation requires MySQL client version 10.1 or newer"
exit $EINVAL
diff --git a/sql-common/client.c b/sql-common/client.c
index 916abdea190..f58601055f0 100644
--- a/sql-common/client.c
+++ b/sql-common/client.c
@@ -3070,9 +3070,10 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user,
mysql->port=port;
/*
- remove the rpl hack from the version string,
- see RPL_VERSION_HACK comment
+ remove the rpl hack from the version string, in case we're connecting
+ to a pre-11.0 server
*/
+#define RPL_VERSION_HACK "5.5.5-"
if ((mysql->server_capabilities & CLIENT_PLUGIN_AUTH) &&
strncmp(mysql->server_version, RPL_VERSION_HACK,
sizeof(RPL_VERSION_HACK) - 1) == 0)
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 89b0bb21414..4938f8da02b 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -22,6 +22,7 @@ IF(WITH_WSREP AND NOT EMBEDDED_LIBRARY)
wsrep_server_service.cc
wsrep_storage_service.cc
wsrep_server_state.cc
+ wsrep_event_service.cc
wsrep_status.cc
wsrep_allowlist_service.cc
wsrep_utils.cc
@@ -37,7 +38,8 @@ IF(WITH_WSREP AND NOT EMBEDDED_LIBRARY)
wsrep_schema.cc
wsrep_plugin.cc
service_wsrep.cc
- )
+ )
+ MYSQL_ADD_PLUGIN(wsrep_provider ${WSREP_SOURCES} DEFAULT NOT_EMBEDDED LINK_LIBRARIES wsrep-lib wsrep_api_v26)
MYSQL_ADD_PLUGIN(wsrep ${WSREP_SOURCES} MANDATORY NOT_EMBEDDED LINK_LIBRARIES wsrep-lib wsrep_api_v26)
IF(VISIBILITY_HIDDEN_FLAG AND TARGET wsrep)
# wsrep_info plugin needs some wsrep symbols from inside mysqld
@@ -106,7 +108,6 @@ SET (SQL_SOURCE
key.cc log.cc lock.cc
log_event.cc log_event_server.cc
rpl_record.cc rpl_reporting.cc
- log_event_old.cc rpl_record_old.cc
mf_iocache.cc my_decimal.cc
mysqld.cc net_serv.cc keycaches.cc
../sql-common/client_plugin.c
@@ -174,6 +175,7 @@ SET (SQL_SOURCE
sql_tvc.cc sql_tvc.h
opt_split.cc
rowid_filter.cc rowid_filter.h
+ optimizer_costs.h optimizer_defaults.h
opt_trace.cc
table_cache.cc encryption.cc temporary_tables.cc
json_table.cc
diff --git a/sql/ddl_log.cc b/sql/ddl_log.cc
index 67c8a8bca4f..a7920ade939 100644
--- a/sql/ddl_log.cc
+++ b/sql/ddl_log.cc
@@ -2741,6 +2741,8 @@ int ddl_log_execute_recovery()
thd->thread_stack= (char*) &thd;
thd->store_globals();
thd->init(); // Needed for error messages
+ thd->set_query_inner((char*) STRING_WITH_LEN("intern:ddl_log_execute_recovery"),
+ default_charset_info);
thd->log_all_errors= (global_system_variables.log_warnings >= 3);
recovery_state.drop_table.free();
diff --git a/sql/debug.cc b/sql/debug.cc
index a0e2340e254..a24cc4e407e 100644
--- a/sql/debug.cc
+++ b/sql/debug.cc
@@ -35,7 +35,7 @@ static const LEX_CSTRING debug_crash_counter=
static const LEX_CSTRING debug_error_counter=
{ STRING_WITH_LEN("debug_error_counter") };
-static bool debug_decrement_counter(const LEX_CSTRING *name)
+bool debug_decrement_counter(const LEX_CSTRING *name)
{
THD *thd= current_thd;
user_var_entry *entry= (user_var_entry*)
diff --git a/sql/debug.h b/sql/debug.h
index 48bae774625..f0eaa79e3c7 100644
--- a/sql/debug.h
+++ b/sql/debug.h
@@ -31,6 +31,7 @@
#ifndef DBUG_OFF
void debug_crash_here(const char *keyword);
bool debug_simulate_error(const char *keyword, uint error);
+bool debug_decrement_counter(const LEX_CSTRING *name);
#else
#define debug_crash_here(A) do { } while(0)
#define debug_simulate_error(A, B) 0
diff --git a/sql/events.cc b/sql/events.cc
index 6ecdf975178..01a11461655 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -906,6 +906,8 @@ Events::init(THD *thd, bool opt_noacl_or_bootstrap)
*/
thd->thread_stack= (char*) &thd;
thd->store_globals();
+ thd->set_query_inner((char*) STRING_WITH_LEN("intern:Events::init"),
+ default_charset_info);
/*
Set current time for the thread that handles events.
Current time is stored in data member start_time of THD class.
diff --git a/sql/filesort.cc b/sql/filesort.cc
index bf5520955c9..96eabfdab89 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -63,10 +63,6 @@ static Addon_fields *get_addon_fields(TABLE *table, uint sortlength,
uint *addon_length,
uint *m_packable_length);
-static bool check_if_pq_applicable(Sort_param *param, SORT_INFO *info,
- TABLE *table,
- ha_rows records, size_t memory_available);
-
static void store_key_part_length(uint32 num, uchar *to, uint bytes)
{
switch(bytes) {
@@ -91,42 +87,62 @@ static uint32 read_keypart_length(const uchar *from, uint bytes)
}
-// @param sortlen [Maximum] length of the sort key
-void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
- ha_rows maxrows, Filesort *filesort)
+/*
+ Initialize Sort_param for doing a filesort
+
+ @param table Table to sort
+ @param Filesort Filesort parameter to filesort()
+ @param sortlen [Maximum] length of the sort key
+ @param limit_rows Number of rows to return (may be less than rows to sort)
+*/
+
+void Sort_param::init_for_filesort(TABLE *table, Filesort *filesort,
+ uint sortlen, ha_rows limit_rows_arg)
{
DBUG_ASSERT(addon_fields == NULL);
- sort_length= sortlen;
- ref_length= table->file->ref_length;
- accepted_rows= filesort->accepted_rows;
-
if (!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
!table->fulltext_searched && !filesort->sort_positions)
{
- /*
- Get the descriptors of all fields whose values are appended
+ /*
+ Get the descriptors of all fields whose values are appended
to sorted fields and get its total length in addon_buf.length
*/
- addon_fields= get_addon_fields(table, sort_length, &addon_length,
+ addon_fields= get_addon_fields(table, sortlen, &addon_length,
&m_packable_length);
}
- if (using_addon_fields())
+ DBUG_ASSERT((using_addon_fields() == 0 || addon_length != 0));
+
+ setup_lengths_and_limit(table, sortlen, addon_length, limit_rows_arg);
+ accepted_rows= filesort->accepted_rows;
+}
+
+
+void Sort_param::setup_lengths_and_limit(TABLE *table,
+ uint sort_len_arg,
+ uint addon_length_arg,
+ ha_rows limit_rows_arg)
+{
+ sort_form= table;
+ sort_length= sort_len_arg;
+ limit_rows= limit_rows_arg;
+ ref_length= table->file->ref_length;
+
+ if (addon_length_arg)
{
- DBUG_ASSERT(addon_length < UINT_MAX32);
- res_length= addon_length;
+ DBUG_ASSERT(addon_length_arg < UINT_MAX32);
+ res_length= addon_length_arg;
}
else
{
res_length= ref_length;
/*
- The reference to the record is considered
- as an additional sorted field
+ The reference (rowid) to the record is considered as an additional
+ sorted field as we want to access rows in rowid order if possible.
*/
sort_length+= ref_length;
}
- rec_length= sort_length + addon_length;
- max_rows= maxrows;
+ rec_length= sort_length + addon_length_arg;
}
@@ -161,6 +177,7 @@ void Sort_param::try_to_pack_addons(ulong max_length_for_sort_data)
rec_length+= sz;
}
+
/**
Sort a table.
Creates a set of pointers that can be used to read the rows
@@ -204,7 +221,8 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
bool allow_packing_for_sortkeys;
Bounded_queue<uchar, uchar> pq;
SQL_SELECT *const select= filesort->select;
- ha_rows max_rows= filesort->limit;
+ Sort_costs costs;
+ ha_rows limit_rows= filesort->limit;
uint s_length= 0, sort_len;
Sort_keys *sort_keys;
DBUG_ENTER("filesort");
@@ -249,7 +267,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
param.sort_keys= sort_keys;
sort_len= sortlength(thd, sort_keys, &allow_packing_for_sortkeys);
- param.init_for_filesort(sort_len, table, max_rows, filesort);
+ param.init_for_filesort(table, filesort, sort_len, limit_rows);
if (!param.accepted_rows)
param.accepted_rows= &not_used;
@@ -264,21 +282,54 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
else
thd->inc_status_sort_scan();
thd->query_plan_flags|= QPLAN_FILESORT;
- tracker->report_use(thd, max_rows);
+ tracker->report_use(thd, limit_rows);
// If number of rows is not known, use as much of sort buffer as possible.
num_rows= table->file->estimate_rows_upper_bound();
- if (check_if_pq_applicable(&param, sort,
- table, num_rows, memory_available))
+ costs.compute_sort_costs(&param, num_rows, memory_available,
+ param.using_addon_fields());
+
+ if (costs.fastest_sort == NO_SORT_POSSIBLE_OUT_OF_MEM)
{
- DBUG_PRINT("info", ("filesort PQ is applicable"));
+ my_error(ER_OUT_OF_SORTMEMORY,MYF(ME_ERROR_LOG + ME_FATAL));
+ goto err;
+ }
+
+ if (costs.fastest_sort == PQ_SORT_ALL_FIELDS ||
+ costs.fastest_sort == PQ_SORT_ORDER_BY_FIELDS)
+ {
+ /* We are going to use priorty queue */
thd->query_plan_flags|= QPLAN_FILESORT_PRIORITY_QUEUE;
status_var_increment(thd->status_var.filesort_pq_sorts_);
tracker->incr_pq_used();
param.using_pq= true;
const size_t compare_length= param.sort_length;
DBUG_ASSERT(param.using_packed_sortkeys() == false);
+
+ if (costs.fastest_sort == PQ_SORT_ORDER_BY_FIELDS && sort->addon_fields)
+ {
+ /*
+ Upper code have addon_fields enabled, which we have decided to
+ not use. Let's delete them.
+ */
+ my_free(sort->addon_fields);
+ sort->addon_fields= NULL;
+ param.addon_fields= NULL;
+ param.res_length= param.ref_length;
+ /*
+ Add the ref (rowid which is stored last in the sort key) to the sort,
+ as we want to retrive rows in id order, if possible.
+ */
+ param.sort_length+= param.ref_length;
+ param.rec_length= param.sort_length;
+ }
+
+ /* Priority queues needs one extra element for doing INSERT */
+ param.max_keys_per_buffer= (uint) param.limit_rows + 1;
+ if (!sort->alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length))
+ goto err;
+
/*
For PQ queries (with limit) we know exactly how many pointers/records
we have in the buffer, so to simplify things, we initialize
@@ -286,7 +337,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
point in doing lazy initialization).
*/
sort->init_record_pointers();
- if (pq.init(param.max_rows,
+ if (pq.init(param.limit_rows,
true, // max_at_top
NULL, // compare_function
compare_length,
@@ -312,9 +363,10 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
tracker->report_sort_keys_format(param.using_packed_sortkeys());
param.using_pq= false;
+ /* Allocate sort buffer. Use as much memory as possible. */
size_t min_sort_memory= MY_MAX(MIN_SORT_MEMORY,
param.sort_length*MERGEBUFF2);
- set_if_bigger(min_sort_memory, sizeof(Merge_chunk*)*MERGEBUFF2);
+ set_if_bigger(min_sort_memory, sizeof(Merge_chunk*) * MERGEBUFF2);
while (memory_available >= min_sort_memory)
{
ulonglong keys= memory_available / (param.rec_length + sizeof(char*));
@@ -346,18 +398,17 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
if (param.tmp_buffer.alloc(param.sort_length))
goto err;
- if (open_cached_file(&buffpek_pointers,mysql_tmpdir,TEMP_PREFIX,
- DISK_BUFFER_SIZE, MYF(MY_WME)))
+ if (open_cached_file(&buffpek_pointers, mysql_tmpdir, TEMP_PREFIX,
+ DISK_CHUNK_SIZE, MYF(MY_WME)))
goto err;
- param.sort_form= table;
param.local_sortorder=
Bounds_checked_array<SORT_FIELD>(filesort->sortorder, s_length);
num_rows= find_all_keys(thd, &param, select,
sort,
&buffpek_pointers,
- &tempfile,
+ &tempfile,
pq.is_initialized() ? &pq : NULL,
&sort->found_rows);
if (num_rows == HA_POS_ERROR)
@@ -397,10 +448,10 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
sort->buffpek.length= maxbuffer;
buffpek= (Merge_chunk *) sort->buffpek.str;
close_cached_file(&buffpek_pointers);
- /* Open cached file if it isn't open */
- if (! my_b_inited(outfile) &&
- open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER,
- MYF(MY_WME)))
+ /* Open cached file if it isn't open */
+ if (!my_b_inited(outfile) &&
+ open_cached_file(outfile, mysql_tmpdir, TEMP_PREFIX, DISK_CHUNK_SIZE,
+ MYF(MY_WME)))
goto err;
if (reinit_io_cache(outfile,WRITE_CACHE,0L,0,0))
goto err;
@@ -416,11 +467,11 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
maxbuffer--; // Offset from 0
if (merge_many_buff(&param, sort->get_raw_buf(),
- buffpek,&maxbuffer,
- &tempfile))
+ buffpek, &maxbuffer,
+ &tempfile))
goto err;
if (flush_io_cache(&tempfile) ||
- reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
+ reinit_io_cache(&tempfile, READ_CACHE, 0L,0,0))
goto err;
if (merge_index(&param,
sort->get_raw_buf(),
@@ -431,10 +482,10 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
goto err;
}
- if (num_rows > param.max_rows)
+ if (num_rows > param.limit_rows)
{
// If find_all_keys() produced more results than the query LIMIT.
- num_rows= param.max_rows;
+ num_rows= param.limit_rows;
}
error= 0;
@@ -809,7 +860,7 @@ static void dbug_print_record(TABLE *table, bool print_rowid)
{
if (no free space in sort_keys buffers)
{
- sort sort_keys buffer;
+ qsort sort_keys buffer;
dump sorted sequence to 'tempfile';
dump BUFFPEK describing sequence location into 'buffpek_pointers';
}
@@ -836,7 +887,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
ha_rows *found_rows)
{
int error, quick_select;
- uint idx, indexpos;
+ uint num_elements_in_buffer, indexpos;
uchar *ref_pos, *next_pos, ref_buff[MAX_REFLENGTH];
TABLE *sort_form;
handler *file;
@@ -851,15 +902,14 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
(select ? select->quick ? "ranges" : "where":
"every row")));
- idx=indexpos=0;
- error=quick_select=0;
- sort_form=param->sort_form;
- file=sort_form->file;
- ref_pos= ref_buff;
- quick_select=select && select->quick;
+ num_elements_in_buffer= indexpos= 0;
+ error= 0;
*found_rows= 0;
- ref_pos= &file->ref[0];
- next_pos=ref_pos;
+ sort_form= param->sort_form;
+ file= sort_form->file;
+ ref_pos= ref_buff;
+ quick_select= select && select->quick;
+ next_pos= ref_pos= &file->ref[0];
DBUG_EXECUTE_IF("show_explain_in_find_all_keys",
dbug_serve_apcs(thd, 1);
@@ -867,7 +917,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
if (!quick_select)
{
- next_pos=(uchar*) 0; /* Find records in sequence */
+ next_pos= (uchar*) 0; /* Find records in sequence */
DBUG_EXECUTE_IF("bug14365043_1",
DBUG_SET("+d,ha_rnd_init_fail"););
if (unlikely(file->ha_rnd_init_with_error(1)))
@@ -966,12 +1016,13 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
{
if (fs_info->isfull())
{
- if (write_keys(param, fs_info, idx, buffpek_pointers, tempfile))
+ if (write_keys(param, fs_info, num_elements_in_buffer,
+ buffpek_pointers, tempfile))
goto err;
- idx= 0;
+ num_elements_in_buffer= 0;
indexpos++;
}
- if (idx == 0)
+ if (num_elements_in_buffer == 0)
fs_info->init_next_record_pointer();
uchar *start_of_rec= fs_info->get_next_record_pointer();
@@ -979,7 +1030,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
ref_pos, using_packed_sortkeys);
if (packed_format && rec_sz != param->rec_length)
fs_info->adjust_next_record_pointer(rec_sz);
- idx++;
+ num_elements_in_buffer++;
}
num_records++;
(*param->accepted_rows)++;
@@ -1015,8 +1066,9 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
file->print_error(error,MYF(ME_ERROR_LOG));
DBUG_RETURN(HA_POS_ERROR);
}
- if (indexpos && idx &&
- write_keys(param, fs_info, idx, buffpek_pointers, tempfile))
+ if (indexpos && num_elements_in_buffer &&
+ write_keys(param, fs_info, num_elements_in_buffer, buffpek_pointers,
+ tempfile))
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
(*found_rows)= num_records;
@@ -1065,7 +1117,7 @@ write_keys(Sort_param *param, SORT_INFO *fs_info, uint count,
fs_info->sort_buffer(param, count);
if (!my_b_inited(tempfile) &&
- open_cached_file(tempfile, mysql_tmpdir, TEMP_PREFIX, DISK_BUFFER_SIZE,
+ open_cached_file(tempfile, mysql_tmpdir, TEMP_PREFIX, DISK_CHUNK_SIZE,
MYF(MY_WME)))
DBUG_RETURN(1); /* purecov: inspected */
/* check we won't have more buffpeks than we can possibly keep in memory */
@@ -1073,15 +1125,13 @@ write_keys(Sort_param *param, SORT_INFO *fs_info, uint count,
DBUG_RETURN(1);
buffpek.set_file_position(my_b_tell(tempfile));
- if ((ha_rows) count > param->max_rows)
- count=(uint) param->max_rows; /* purecov: inspected */
+ if ((ha_rows) count > param->limit_rows)
+ count=(uint) param->limit_rows; /* purecov: inspected */
buffpek.set_rowcount(static_cast<ha_rows>(count));
for (uint ix= 0; ix < count; ++ix)
{
uchar *record= fs_info->get_sorted_record(ix);
-
-
if (my_b_write(tempfile, record, param->get_record_length(record)))
DBUG_RETURN(1); /* purecov: inspected */
}
@@ -1496,144 +1546,6 @@ static bool save_index(Sort_param *param, uint count,
}
-/**
- Test whether priority queue is worth using to get top elements of an
- ordered result set. If it is, then allocates buffer for required amount of
- records
-
- @param param Sort parameters.
- @param filesort_info Filesort information.
- @param table Table to sort.
- @param num_rows Estimate of number of rows in source record set.
- @param memory_available Memory available for sorting.
-
- DESCRIPTION
- Given a query like this:
- SELECT ... FROM t ORDER BY a1,...,an LIMIT max_rows;
- This function tests whether a priority queue should be used to keep
- the result. Necessary conditions are:
- - estimate that it is actually cheaper than merge-sort
- - enough memory to store the <max_rows> records.
-
- If we don't have space for <max_rows> records, but we *do* have
- space for <max_rows> keys, we may rewrite 'table' to sort with
- references to records instead of additional data.
- (again, based on estimates that it will actually be cheaper).
-
- @retval
- true - if it's ok to use PQ
- false - PQ will be slower than merge-sort, or there is not enough memory.
-*/
-
-static bool check_if_pq_applicable(Sort_param *param,
- SORT_INFO *filesort_info,
- TABLE *table, ha_rows num_rows,
- size_t memory_available)
-{
- DBUG_ENTER("check_if_pq_applicable");
-
- /*
- How much Priority Queue sort is slower than qsort.
- Measurements (see unit test) indicate that PQ is roughly 3 times slower.
- */
- const double PQ_slowness= 3.0;
-
- if (param->max_rows == HA_POS_ERROR)
- {
- DBUG_PRINT("info", ("No LIMIT"));
- DBUG_RETURN(false);
- }
-
- if (param->max_rows + 2 >= UINT_MAX)
- {
- DBUG_PRINT("info", ("Too large LIMIT"));
- DBUG_RETURN(false);
- }
-
- size_t num_available_keys=
- memory_available / (param->rec_length + sizeof(char*));
- // We need 1 extra record in the buffer, when using PQ.
- param->max_keys_per_buffer= (uint) param->max_rows + 1;
-
- if (num_rows < num_available_keys)
- {
- // The whole source set fits into memory.
- if (param->max_rows < num_rows/PQ_slowness )
- {
- filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->rec_length);
- DBUG_RETURN(filesort_info->sort_buffer_size() != 0);
- }
- else
- {
- // PQ will be slower.
- DBUG_RETURN(false);
- }
- }
-
- // Do we have space for LIMIT rows in memory?
- if (param->max_keys_per_buffer < num_available_keys)
- {
- filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->rec_length);
- DBUG_RETURN(filesort_info->sort_buffer_size() != 0);
- }
-
- // Try to strip off addon fields.
- if (param->addon_fields)
- {
- const size_t row_length=
- param->sort_length + param->ref_length + sizeof(char*);
- num_available_keys= memory_available / row_length;
-
- // Can we fit all the keys in memory?
- if (param->max_keys_per_buffer < num_available_keys)
- {
- const double sort_merge_cost=
- get_merge_many_buffs_cost_fast(num_rows,
- num_available_keys,
- (uint)row_length);
- /*
- PQ has cost:
- (insert + qsort) * log(queue size) / TIME_FOR_COMPARE_ROWID +
- cost of file lookup afterwards.
- The lookup cost is a bit pessimistic: we take scan_time and assume
- that on average we find the row after scanning half of the file.
- A better estimate would be lookup cost, but note that we are doing
- random lookups here, rather than sequential scan.
- */
- const double pq_cpu_cost=
- (PQ_slowness * num_rows + param->max_keys_per_buffer) *
- log((double) param->max_keys_per_buffer) / TIME_FOR_COMPARE_ROWID;
- const double pq_io_cost=
- param->max_rows * table->file->scan_time() / 2.0;
- const double pq_cost= pq_cpu_cost + pq_io_cost;
-
- if (sort_merge_cost < pq_cost)
- DBUG_RETURN(false);
-
- filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
- param->sort_length + param->ref_length);
-
- if (filesort_info->sort_buffer_size() > 0)
- {
- /* Make attached data to be references instead of fields. */
- my_free(filesort_info->addon_fields);
- filesort_info->addon_fields= NULL;
- param->addon_fields= NULL;
-
- param->res_length= param->ref_length;
- param->sort_length+= param->ref_length;
- param->rec_length= param->sort_length;
-
- DBUG_RETURN(true);
- }
- }
- }
- DBUG_RETURN(false);
-}
-
-
/** Merge buffers to make < MERGEBUFF2 buffers. */
int merge_many_buff(Sort_param *param, Sort_buffer sort_buffer,
@@ -1645,28 +1557,28 @@ int merge_many_buff(Sort_param *param, Sort_buffer sort_buffer,
DBUG_ENTER("merge_many_buff");
if (*maxbuffer < MERGEBUFF2)
- DBUG_RETURN(0); /* purecov: inspected */
+ DBUG_RETURN(0); /* purecov: inspected */
if (flush_io_cache(t_file) ||
- open_cached_file(&t_file2,mysql_tmpdir,TEMP_PREFIX,DISK_BUFFER_SIZE,
- MYF(MY_WME)))
+ open_cached_file(&t_file2, mysql_tmpdir, TEMP_PREFIX, DISK_CHUNK_SIZE,
+ MYF(MY_WME)))
DBUG_RETURN(1); /* purecov: inspected */
- from_file= t_file ; to_file= &t_file2;
+ from_file= t_file; to_file= &t_file2;
while (*maxbuffer >= MERGEBUFF2)
{
- if (reinit_io_cache(from_file,READ_CACHE,0L,0,0))
+ if (reinit_io_cache(from_file, READ_CACHE, 0L, 0, 0))
goto cleanup;
- if (reinit_io_cache(to_file,WRITE_CACHE,0L,0,0))
+ if (reinit_io_cache(to_file, WRITE_CACHE,0L, 0, 0))
goto cleanup;
lastbuff=buffpek;
- for (i=0 ; i <= *maxbuffer-MERGEBUFF*3/2 ; i+=MERGEBUFF)
+ for (i= 0; i <= *maxbuffer - MERGEBUFF * 3 / 2 ; i+= MERGEBUFF)
{
- if (merge_buffers(param,from_file,to_file,sort_buffer, lastbuff++,
- buffpek+i,buffpek+i+MERGEBUFF-1,0))
+ if (merge_buffers(param, from_file, to_file, sort_buffer, lastbuff++,
+ buffpek + i, buffpek + i + MERGEBUFF - 1, 0))
goto cleanup;
}
- if (merge_buffers(param,from_file,to_file,sort_buffer, lastbuff++,
- buffpek+i,buffpek+ *maxbuffer,0))
+ if (merge_buffers(param, from_file, to_file, sort_buffer, lastbuff++,
+ buffpek + i, buffpek + *maxbuffer, 0))
break; /* purecov: inspected */
if (flush_io_cache(to_file))
break; /* purecov: inspected */
@@ -1764,7 +1676,7 @@ ulong read_to_buffer(IO_CACHE *fromfile, Merge_chunk *buffpek,
num_bytes_read= bytes_to_read;
buffpek->init_current_key();
- buffpek->advance_file_position(num_bytes_read); /* New filepos */
+ buffpek->advance_file_position(num_bytes_read); /* New filepos */
buffpek->decrement_rowcount(count);
buffpek->set_mem_count(count);
return (ulong) num_bytes_read;
@@ -1863,7 +1775,7 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
maxcount= (ulong) (param->max_keys_per_buffer/((uint) (Tb-Fb) +1));
to_start_filepos= my_b_tell(to_file);
strpos= sort_buffer.array();
- org_max_rows=max_rows= param->max_rows;
+ org_max_rows= max_rows= param->limit_rows;
set_if_bigger(maxcount, 1);
@@ -1878,17 +1790,17 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
first_cmp_arg= param->get_compare_argument(&sort_length);
}
if (unlikely(init_queue(&queue, (uint) (Tb-Fb)+1,
- offsetof(Merge_chunk,m_current_key), 0,
+ offsetof(Merge_chunk,m_current_key), 0,
(queue_compare) cmp, first_cmp_arg, 0, 0)))
DBUG_RETURN(1); /* purecov: inspected */
- const size_t chunk_sz = (sort_buffer.size()/((uint) (Tb-Fb) +1));
- for (buffpek= Fb ; buffpek <= Tb ; buffpek++)
+ const size_t chunk_sz= (sort_buffer.size()/((uint) (Tb-Fb) +1));
+ for (buffpek= Fb; buffpek <= Tb; buffpek++)
{
buffpek->set_buffer(strpos, strpos + chunk_sz);
buffpek->set_max_keys(maxcount);
bytes_read= read_to_buffer(from_file, buffpek, param, packed_format);
if (unlikely(bytes_read == (ulong) -1))
- goto err; /* purecov: inspected */
+ goto err; /* purecov: inspected */
strpos+= chunk_sz;
// If less data in buffers than expected
buffpek->set_max_keys(buffpek->mem_count());
@@ -1970,11 +1882,11 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
*/
if (!check_dupl_count || dupl_count >= min_dupl_count)
{
- if(my_b_write(to_file,
- src + (offset_for_packing ?
- rec_length - res_length : // sort length
- wr_offset),
- bytes_to_write))
+ if (my_b_write(to_file,
+ src + (offset_for_packing ?
+ rec_length - res_length : // sort length
+ wr_offset),
+ bytes_to_write))
goto err; /* purecov: inspected */
}
if (cmp)
@@ -2084,7 +1996,7 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
bytes_read != 0);
end:
- lastbuff->set_rowcount(MY_MIN(org_max_rows-max_rows, param->max_rows));
+ lastbuff->set_rowcount(MY_MIN(org_max_rows - max_rows, param->limit_rows));
lastbuff->set_file_position(to_start_filepos);
cleanup:
@@ -2171,8 +2083,8 @@ Type_handler_timestamp_common::sort_length(THD *thd,
void
Type_handler_int_result::sort_length(THD *thd,
- const Type_std_attributes *item,
- SORT_FIELD_ATTR *sortorder) const
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *sortorder) const
{
sortorder->original_length= sortorder->length= 8; // Sizof intern longlong
}
@@ -2180,8 +2092,8 @@ Type_handler_int_result::sort_length(THD *thd,
void
Type_handler_real_result::sort_length(THD *thd,
- const Type_std_attributes *item,
- SORT_FIELD_ATTR *sortorder) const
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *sortorder) const
{
sortorder->original_length= sortorder->length= sizeof(double);
}
@@ -2205,13 +2117,13 @@ Type_handler_decimal_result::sort_length(THD *thd,
@param thd Thread handler
@param sortorder Order of items to sort
@param s_length Number of items to sort
- @param allow_packing_for_sortkeys [out] set to false if packing sort keys is not
- allowed
+ @param allow_packing_for_sortkeys [out] set to false if packing sort keys
+ is not allowed
@note
- * sortorder->length and other members are updated for each sort item.
- * TODO what is the meaning of this value if some fields are using packing while
- others are not?
+ - sortorder->length and other members are updated for each sort item.
+ - TODO what is the meaning of this value if some fields are using packing
+ while others are not?
@return
Total length of sort buffer in bytes
diff --git a/sql/filesort_utils.cc b/sql/filesort_utils.cc
index 5a51300a0fa..e1cd7c566bb 100644
--- a/sql/filesort_utils.cc
+++ b/sql/filesort_utils.cc
@@ -19,23 +19,106 @@
#include "sql_const.h"
#include "sql_sort.h"
#include "table.h"
-
+#include "optimizer_defaults.h"
PSI_memory_key key_memory_Filesort_buffer_sort_keys;
-namespace {
+const LEX_CSTRING filesort_names[]=
+{
+ { STRING_WITH_LEN("priority_queue with addon fields")},
+ { STRING_WITH_LEN("priority_queue with row lookup")},
+ { STRING_WITH_LEN("merge_sort with addon fields")},
+ { STRING_WITH_LEN("merge_sort with row lookup)")},
+ { STRING_WITH_LEN("Error while computing filesort cost")}
+};
+
+/*
+ Different ways to do sorting:
+ Merge Sort -> Without addon Fields, with fixed length
+ Merge Sort -> Without addon Fields, with dynamic length
+ Merge Sort -> With addon Fields, with fixed length
+ Merge Sort -> With addon Fields, with dynamic length
+
+ Priority queue -> Without addon fields
+ Priority queue -> With addon fields
+
+ With PQ (Priority queue) we could have a simple key (memcmp) or a
+ complex key (double & varchar for example). This cost difference
+ is currently not considered.
+*/
+
+
/**
- A local helper function. See comments for get_merge_buffers_cost().
- */
-double get_merge_cost(ha_rows num_elements, ha_rows num_buffers, uint elem_size)
+ Compute the cost of running qsort over a set of rows.
+ @param num_rows How many rows will be sorted.
+ @param with_addon_fields Set to true if the sorted rows include the whole
+ row (with addon fields) or just the keys themselves.
+
+ @retval
+ Cost of the operation.
+*/
+
+double get_qsort_sort_cost(ha_rows num_rows, bool with_addon_fields)
{
- return
- 2.0 * ((double) num_elements * elem_size) / IO_SIZE
- + (double) num_elements * log((double) num_buffers) /
- (TIME_FOR_COMPARE_ROWID * M_LN2);
+ const double row_copy_cost= with_addon_fields ? DEFAULT_ROW_COPY_COST :
+ DEFAULT_KEY_COPY_COST;
+ const double key_cmp_cost= DEFAULT_KEY_COMPARE_COST;
+ const double qsort_constant_factor= QSORT_SORT_SLOWNESS_CORRECTION_FACTOR *
+ (row_copy_cost + key_cmp_cost);
+
+ return qsort_constant_factor * num_rows * log2(1.0 + num_rows);
}
+
+
+/**
+ Compute the cost of sorting num_rows and only retrieving queue_size rows.
+ @param num_rows How many rows will be sorted.
+ @param queue_size How many rows will be returned by the priority
+ queue.
+ @param with_addon_fields Set to true if the sorted rows include the whole
+ row (with addon fields) or just the keys themselves.
+
+ @retval
+ Cost of the operation.
+*/
+
+double get_pq_sort_cost(size_t num_rows, size_t queue_size,
+ bool with_addon_fields)
+{
+ const double row_copy_cost= with_addon_fields ? DEFAULT_ROW_COPY_COST :
+ DEFAULT_KEY_COPY_COST;
+ const double key_cmp_cost= DEFAULT_KEY_COMPARE_COST;
+ /* 2 -> 1 insert, 1 pop from the queue*/
+ const double pq_sort_constant_factor= PQ_SORT_SLOWNESS_CORRECTION_FACTOR *
+ 2.0 * (row_copy_cost + key_cmp_cost);
+
+ return pq_sort_constant_factor * num_rows * log2(1.0 + queue_size);
+}
+
+
+/**
+ Compute the cost of merging "num_buffers" sorted buffers using a priority
+ queue.
+
+ See comments for get_merge_buffers_cost().
+*/
+
+static
+double get_merge_cost(ha_rows num_elements, ha_rows num_buffers,
+ size_t elem_size, double compare_cost,
+ double disk_read_cost)
+{
+ /* 2 -> 1 read + 1 write */
+ const double io_cost= (2.0 * (num_elements * elem_size +
+ DISK_CHUNK_SIZE - 1) /
+ DISK_CHUNK_SIZE) * disk_read_cost;
+ /* 2 -> 1 insert, 1 pop for the priority queue used to merge the buffers. */
+ const double cpu_cost= (2.0 * num_elements * log2(1.0 + num_buffers) *
+ compare_cost) * PQ_SORT_SLOWNESS_CORRECTION_FACTOR;
+ return io_cost + cpu_cost;
}
+
/**
This is a simplified, and faster version of @see get_merge_many_buffs_cost().
We calculate the cost of merging buffers, by simulating the actions
@@ -43,40 +126,52 @@ double get_merge_cost(ha_rows num_elements, ha_rows num_buffers, uint elem_size)
see comments for get_merge_buffers_cost().
TODO: Use this function for Unique::get_use_cost().
*/
+
double get_merge_many_buffs_cost_fast(ha_rows num_rows,
ha_rows num_keys_per_buffer,
- uint elem_size)
+ size_t elem_size,
+ double key_compare_cost,
+ double disk_read_cost,
+ bool with_addon_fields)
{
+ DBUG_ASSERT(num_keys_per_buffer != 0);
+
ha_rows num_buffers= num_rows / num_keys_per_buffer;
ha_rows last_n_elems= num_rows % num_keys_per_buffer;
double total_cost;
+ double full_buffer_sort_cost;
- // Calculate CPU cost of sorting buffers.
- total_cost=
- ((num_buffers * num_keys_per_buffer * log(1.0 + num_keys_per_buffer) +
- last_n_elems * log(1.0 + last_n_elems)) /
- TIME_FOR_COMPARE_ROWID);
-
- // Simulate behavior of merge_many_buff().
+ /* Calculate cost for sorting all merge buffers + the last one. */
+ full_buffer_sort_cost= get_qsort_sort_cost(num_keys_per_buffer,
+ with_addon_fields);
+ total_cost= (num_buffers * full_buffer_sort_cost +
+ get_qsort_sort_cost(last_n_elems, with_addon_fields));
+
+ if (num_buffers >= MERGEBUFF2)
+ total_cost+= TMPFILE_CREATE_COST * 2; // We are creating 2 files.
+
+ /* Simulate behavior of merge_many_buff(). */
while (num_buffers >= MERGEBUFF2)
{
- // Calculate # of calls to merge_buffers().
- const ha_rows loop_limit= num_buffers - MERGEBUFF*3/2;
- const ha_rows num_merge_calls= 1 + loop_limit/MERGEBUFF;
+ /* Calculate # of calls to merge_buffers(). */
+ const ha_rows loop_limit= num_buffers - MERGEBUFF * 3 / 2;
+ const ha_rows num_merge_calls= 1 + loop_limit / MERGEBUFF;
const ha_rows num_remaining_buffs=
num_buffers - num_merge_calls * MERGEBUFF;
- // Cost of merge sort 'num_merge_calls'.
+ /* Cost of merge sort 'num_merge_calls'. */
total_cost+=
num_merge_calls *
- get_merge_cost(num_keys_per_buffer * MERGEBUFF, MERGEBUFF, elem_size);
+ get_merge_cost(num_keys_per_buffer * MERGEBUFF, MERGEBUFF, elem_size,
+ key_compare_cost, disk_read_cost);
// # of records in remaining buffers.
last_n_elems+= num_remaining_buffs * num_keys_per_buffer;
// Cost of merge sort of remaining buffers.
total_cost+=
- get_merge_cost(last_n_elems, 1 + num_remaining_buffs, elem_size);
+ get_merge_cost(last_n_elems, 1 + num_remaining_buffs, elem_size,
+ key_compare_cost, disk_read_cost);
num_buffers= num_merge_calls;
num_keys_per_buffer*= MERGEBUFF;
@@ -84,10 +179,139 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows,
// Simulate final merge_buff call.
last_n_elems+= num_keys_per_buffer * num_buffers;
- total_cost+= get_merge_cost(last_n_elems, 1 + num_buffers, elem_size);
+ total_cost+= get_merge_cost(last_n_elems, 1 + num_buffers, elem_size,
+ key_compare_cost, disk_read_cost);
return total_cost;
}
+
+void Sort_costs::compute_fastest_sort()
+{
+ lowest_cost= DBL_MAX;
+ uint min_idx= NO_SORT_POSSIBLE_OUT_OF_MEM;
+ for (uint i= 0; i < FINAL_SORT_TYPE; i++)
+ {
+ if (lowest_cost > costs[i])
+ {
+ min_idx= i;
+ lowest_cost= costs[i];
+ }
+ }
+ fastest_sort= static_cast<enum sort_type>(min_idx);
+}
+
+
+/*
+ Calculate cost of using priority queue for filesort.
+ There are two options: using addon fields or not
+*/
+
+void Sort_costs::compute_pq_sort_costs(Sort_param *param, ha_rows num_rows,
+ size_t memory_available,
+ bool with_addon_fields)
+{
+ /*
+ Implementation detail of PQ. To be able to keep a PQ of size N we need
+ N+1 elements allocated so we can use the last element as "swap" space
+ for the "insert" operation.
+ TODO(cvicentiu): This should be left as an implementation detail inside
+ the PQ, not have the optimizer take it into account.
+ */
+ size_t queue_size= param->limit_rows + 1;
+ size_t row_length, num_available_keys;
+
+ costs[PQ_SORT_ALL_FIELDS]= DBL_MAX;
+ costs[PQ_SORT_ORDER_BY_FIELDS]= DBL_MAX;
+
+ /*
+ We can't use priority queue if there's no limit or the limit is
+ too big.
+ */
+ if (param->limit_rows == HA_POS_ERROR ||
+ param->limit_rows >= UINT_MAX - 2)
+ return;
+
+ /* Calculate cost without addon keys (probably using less memory) */
+ row_length= param->sort_length + param->ref_length + sizeof(char*);
+ num_available_keys= memory_available / row_length;
+
+ if (queue_size < num_available_keys)
+ {
+ handler *file= param->sort_form->file;
+ costs[PQ_SORT_ORDER_BY_FIELDS]=
+ get_pq_sort_cost(num_rows, queue_size, false) +
+ file->cost(file->ha_rnd_pos_call_time(MY_MIN(queue_size - 1, num_rows)));
+ }
+
+ /* Calculate cost with addon fields */
+ if (with_addon_fields)
+ {
+ row_length= param->rec_length + sizeof(char *);
+ num_available_keys= memory_available / row_length;
+
+ if (queue_size < num_available_keys)
+ costs[PQ_SORT_ALL_FIELDS]= get_pq_sort_cost(num_rows, queue_size, true);
+ }
+}
+
+/*
+ Calculate cost of using qsort optional merge sort for resolving filesort.
+ There are two options: using addon fields or not
+*/
+
+void Sort_costs::compute_merge_sort_costs(Sort_param *param,
+ ha_rows num_rows,
+ size_t memory_available,
+ bool with_addon_fields)
+{
+ size_t row_length= param->sort_length + param->ref_length + sizeof(char *);
+ size_t num_available_keys= memory_available / row_length;
+
+ costs[MERGE_SORT_ALL_FIELDS]= DBL_MAX;
+ costs[MERGE_SORT_ORDER_BY_FIELDS]= DBL_MAX;
+
+ if (num_available_keys)
+ {
+ handler *file= param->sort_form->file;
+ costs[MERGE_SORT_ORDER_BY_FIELDS]=
+ get_merge_many_buffs_cost_fast(num_rows, num_available_keys,
+ row_length, DEFAULT_KEY_COMPARE_COST,
+ default_optimizer_costs.disk_read_cost,
+ false) +
+ file->cost(file->ha_rnd_pos_call_time(MY_MIN(param->limit_rows, num_rows)));
+ }
+ if (with_addon_fields)
+ {
+ /* Compute cost of merge sort *if* we strip addon fields. */
+ row_length= param->rec_length + sizeof(char *);
+ num_available_keys= memory_available / row_length;
+
+ if (num_available_keys)
+ costs[MERGE_SORT_ALL_FIELDS]=
+ get_merge_many_buffs_cost_fast(num_rows, num_available_keys,
+ row_length, DEFAULT_KEY_COMPARE_COST,
+ DISK_READ_COST_THD(thd),
+ true);
+ }
+
+ /*
+ TODO(cvicentiu) we do not handle dynamic length fields yet.
+ The code should decide here if the format is FIXED length or DYNAMIC
+ and fill in the appropriate costs.
+ */
+}
+
+void Sort_costs::compute_sort_costs(Sort_param *param, ha_rows num_rows,
+ size_t memory_available,
+ bool with_addon_fields)
+{
+ compute_pq_sort_costs(param, num_rows, memory_available,
+ with_addon_fields);
+ compute_merge_sort_costs(param, num_rows, memory_available,
+ with_addon_fields);
+ compute_fastest_sort();
+}
+
/*
alloc_sort_buffer()
@@ -173,7 +397,7 @@ void Filesort_buffer::sort_buffer(const Sort_param *param, uint count)
uchar **buffer= NULL;
if (!param->using_packed_sortkeys() &&
- radixsort_is_appliccable(count, param->sort_length) &&
+ radixsort_is_applicable(count, param->sort_length) &&
(buffer= (uchar**) my_malloc(PSI_INSTRUMENT_ME, count*sizeof(char*),
MYF(MY_THREAD_SPECIFIC))))
{
@@ -186,3 +410,66 @@ void Filesort_buffer::sort_buffer(const Sort_param *param, uint count)
param->get_compare_function(),
param->get_compare_argument(&size));
}
+
+
+static
+size_t get_sort_length(THD *thd, Item_field *item)
+{
+ SORT_FIELD_ATTR sort_attr;
+ sort_attr.type= ((item->field)->is_packable() ?
+ SORT_FIELD_ATTR::VARIABLE_SIZE :
+ SORT_FIELD_ATTR::FIXED_SIZE);
+ item->type_handler()->sort_length(thd, item, &sort_attr);
+
+ return sort_attr.length + (item->maybe_null() ? 1 : 0);
+}
+
+
+/**
+ Calculate the cost of doing a filesort
+
+ @param table Table to sort
+ @param Order_by Fields to sort
+ @param rows_to_read Number of rows to be sorted
+ @param limit_rows Number of rows in result (when using limit)
+ @param used_sort_type Set to the sort algorithm used
+
+ @result cost of sorting
+*/
+
+
+double cost_of_filesort(TABLE *table, ORDER *order_by, ha_rows rows_to_read,
+ ha_rows limit_rows, enum sort_type *used_sort_type)
+{
+ THD *thd= table->in_use;
+ Sort_costs costs;
+ Sort_param param;
+ size_t memory_available= (size_t) thd->variables.sortbuff_size;
+ uint sort_len= 0;
+ uint addon_field_length, num_addon_fields, num_nullable_fields;
+ uint packable_length;
+ bool with_addon_fields;
+
+ for (ORDER *ptr= order_by; ptr ; ptr= ptr->next)
+ {
+ Item_field *field= (Item_field*) (*ptr->item)->real_item();
+ size_t length= get_sort_length(thd, field);
+ set_if_smaller(length, thd->variables.max_sort_length);
+ sort_len+= (uint) length;
+ }
+
+ with_addon_fields=
+ filesort_use_addons(table, sort_len, &addon_field_length,
+ &num_addon_fields, &num_nullable_fields,
+ &packable_length);
+
+ /* Fill in the Sort_param structure so we can compute the sort costs */
+ param.setup_lengths_and_limit(table, sort_len, addon_field_length,
+ limit_rows);
+
+ costs.compute_sort_costs(&param, rows_to_read, memory_available,
+ with_addon_fields);
+
+ *used_sort_type= costs.fastest_sort;
+ return costs.lowest_cost;
+}
diff --git a/sql/filesort_utils.h b/sql/filesort_utils.h
index 946b1cb4f06..776e986e14a 100644
--- a/sql/filesort_utils.h
+++ b/sql/filesort_utils.h
@@ -16,16 +16,20 @@
#ifndef FILESORT_UTILS_INCLUDED
#define FILESORT_UTILS_INCLUDED
+#include "my_global.h"
#include "my_base.h"
#include "sql_array.h"
+#include "handler.h"
class Sort_param;
-/*
+
+/**
Calculate cost of merge sort
@param num_rows Total number of rows.
@param num_keys_per_buffer Number of keys per buffer.
@param elem_size Size of each element.
+ @param key_compare_cost Cost to compare two keys during QSort & merge
Calculates cost of merge sort by simulating call to merge_many_buff().
@@ -38,19 +42,82 @@ class Sort_param;
See also comments get_merge_many_buffs_cost().
*/
-
double get_merge_many_buffs_cost_fast(ha_rows num_rows,
ha_rows num_keys_per_buffer,
- uint elem_size);
+ size_t elem_size,
+ double compare_cost,
+ bool with_addon_fields);
+
/**
+ These are the current sorting algorithms we compute cost for:
+
+ PQ_SORT_ALL_FIELDS Sort via priority queue, with addon fields.
+ PQ_SORT_ORDER_BY_FIELDS Sort via priority queue, without addon fields.
+
+ MERGE_SORT_ALL_FIELDS Sort via merge sort, with addon fields.
+ MERGE_SORT_ORDER_BY_FIELDS Sort via merge sort, without addon fields.
+
+ Note:
+ There is the possibility to do merge-sorting with dynamic length fields.
+ This is more expensive than if there are only fixed length fields,
+ however we do not (yet) account for that extra cost. We can extend the
+ cost computation in the future to cover that case as well.
+
+ Effectively there are 4 possible combinations for merge sort:
+ With/without addon fields
+ With/without dynamic length fields.
+*/
+
+enum sort_type
+{
+ PQ_SORT_ALL_FIELDS= 0,
+ PQ_SORT_ORDER_BY_FIELDS,
+ MERGE_SORT_ALL_FIELDS,
+ MERGE_SORT_ORDER_BY_FIELDS,
+
+ NO_SORT_POSSIBLE_OUT_OF_MEM, /* In case of errors */
+ FINAL_SORT_TYPE= NO_SORT_POSSIBLE_OUT_OF_MEM
+};
+
+struct Sort_costs
+{
+ Sort_costs() :
+ fastest_sort(NO_SORT_POSSIBLE_OUT_OF_MEM), lowest_cost(DBL_MAX) {}
+
+ void compute_sort_costs(Sort_param *param, ha_rows num_rows,
+ size_t memory_available,
+ bool with_addon_fields);
+
+ /* Cache value for fastest_sort. */
+ enum sort_type fastest_sort;
+ /* Cache value for lowest cost. */
+ double lowest_cost;
+private:
+ /*
+ Array to hold all computed costs.
+ TODO(cvicentiu) This array is only useful for debugging. If it's not
+ used in debugging code, it can be removed to reduce memory usage.
+ */
+ double costs[FINAL_SORT_TYPE];
+
+ void compute_pq_sort_costs(Sort_param *param, ha_rows num_rows,
+ size_t memory_available,
+ bool with_addon_fields);
+ void compute_merge_sort_costs(Sort_param *param, ha_rows num_rows,
+ size_t memory_available,
+ bool with_addon_fields);
+ void compute_fastest_sort();
+};
+
+/**
A wrapper class around the buffer used by filesort().
The sort buffer is a contiguous chunk of memory,
containing both records to be sorted, and pointers to said records:
- <start of buffer | still unused | end of buffer>
- |rec 0|record 1 |rec 2| ............ |ptr to rec2|ptr to rec1|ptr to rec0|
+ <start of buffer | still unused | end of buffer>
+ | rec0 | rec1 | rec2 | ............ |ptr to rec2|ptr to rec1|ptr to rec0|
Records will be inserted "left-to-right". Records are not necessarily
fixed-size, they can be packed and stored without any "gaps".
@@ -268,8 +335,14 @@ private:
longlong m_idx;
};
+/* Names for sort_type */
+extern const LEX_CSTRING filesort_names[];
+
+double cost_of_filesort(TABLE *table, ORDER *order_by, ha_rows rows_to_read,
+ ha_rows limit_rows, enum sort_type *used_sort_type);
+
+double get_qsort_sort_cost(ha_rows num_rows, bool with_addon_fields);
int compare_packed_sort_keys(void *sort_keys, unsigned char **a,
unsigned char **b);
qsort2_cmp get_packed_keys_compare_ptr();
-
#endif // FILESORT_UTILS_INCLUDED
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index c5c683b6666..5948d657305 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -6575,7 +6575,7 @@ ha_rows ha_partition::multi_range_read_info_const(uint keyno,
RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *mrr_mode,
+ uint *mrr_mode, ha_rows limit,
Cost_estimate *cost)
{
int error;
@@ -6629,14 +6629,14 @@ ha_rows ha_partition::multi_range_read_info_const(uint keyno,
ha_rows tmp_rows;
uint tmp_mrr_mode;
m_mrr_buffer_size[i]= 0;
- part_cost.reset();
+ part_cost.reset(*file);
tmp_mrr_mode= *mrr_mode;
tmp_rows= (*file)->
multi_range_read_info_const(keyno, &m_part_seq_if,
&m_partition_part_key_multi_range_hld[i],
m_part_mrr_range_length[i],
&m_mrr_buffer_size[i],
- &tmp_mrr_mode, &part_cost);
+ &tmp_mrr_mode, limit, &part_cost);
if (tmp_rows == HA_POS_ERROR)
{
m_part_spec= save_part_spec;
@@ -6680,7 +6680,7 @@ ha_rows ha_partition::multi_range_read_info(uint keyno, uint n_ranges,
{
ha_rows tmp_rows;
m_mrr_buffer_size[i]= 0;
- part_cost.reset();
+ part_cost.reset(*file);
if ((tmp_rows= (*file)->multi_range_read_info(keyno, n_ranges, keys,
key_parts,
&m_mrr_buffer_size[i],
@@ -9737,16 +9737,28 @@ uint ha_partition::get_biggest_used_partition(uint *part_index)
time for scan
*/
-double ha_partition::scan_time()
+IO_AND_CPU_COST ha_partition::scan_time()
{
- double scan_time= 0;
+ IO_AND_CPU_COST scan_time= {0,0};
uint i;
DBUG_ENTER("ha_partition::scan_time");
for (i= bitmap_get_first_set(&m_part_info->read_partitions);
i < m_tot_parts;
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
- scan_time+= m_file[i]->scan_time();
+ {
+ IO_AND_CPU_COST cost= m_file[i]->scan_time();
+ scan_time.io+= cost.io;
+ scan_time.cpu+= cost.cpu;
+ }
+ if (m_tot_parts)
+ {
+ /*
+ Add TABLE_SCAN_SETUP_COST for partitions to make cost similar to
+ in ha_scan_time()
+ */
+ scan_time.cpu+= TABLE_SCAN_SETUP_COST * (m_tot_parts - 1);
+ }
DBUG_RETURN(scan_time);
}
@@ -9760,34 +9772,78 @@ double ha_partition::scan_time()
@return time for scanning index inx
*/
-double ha_partition::key_scan_time(uint inx)
+IO_AND_CPU_COST ha_partition::key_scan_time(uint inx, ha_rows rows)
{
- double scan_time= 0;
+ IO_AND_CPU_COST scan_time= {0,0};
uint i;
+ uint partitions= bitmap_bits_set(&m_part_info->read_partitions);
+ ha_rows rows_per_part;
DBUG_ENTER("ha_partition::key_scan_time");
+
+ if (partitions == 0)
+ DBUG_RETURN(scan_time);
+ set_if_bigger(rows, 1);
+ rows_per_part= (rows + partitions - 1)/partitions;
+
for (i= bitmap_get_first_set(&m_part_info->read_partitions);
i < m_tot_parts;
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
- scan_time+= m_file[i]->key_scan_time(inx);
+ {
+ IO_AND_CPU_COST cost= m_file[i]->key_scan_time(inx, rows_per_part);
+ scan_time.io+= cost.io;
+ scan_time.cpu+= cost.cpu;
+ }
DBUG_RETURN(scan_time);
}
-double ha_partition::keyread_time(uint inx, uint ranges, ha_rows rows)
+IO_AND_CPU_COST ha_partition::keyread_time(uint inx, ulong ranges, ha_rows rows,
+ ulonglong blocks)
{
- double read_time= 0;
+ IO_AND_CPU_COST read_time= {0,0};
uint i;
+ uint partitions= bitmap_bits_set(&m_part_info->read_partitions);
DBUG_ENTER("ha_partition::keyread_time");
- if (!ranges)
- DBUG_RETURN(handler::keyread_time(inx, ranges, rows));
+ if (partitions == 0)
+ DBUG_RETURN(read_time);
+
+ ha_rows rows_per_part= (rows + partitions - 1)/partitions;
for (i= bitmap_get_first_set(&m_part_info->read_partitions);
i < m_tot_parts;
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
- read_time+= m_file[i]->keyread_time(inx, ranges, rows);
+ {
+ IO_AND_CPU_COST cost= m_file[i]->keyread_time(inx, ranges, rows_per_part,
+ blocks);
+ read_time.io+= cost.io;
+ read_time.cpu+= cost.cpu;
+ }
+ /* Add that we have to do a key lookup for all ranges in all partitions */
+ read_time.cpu= (partitions-1) * ranges * KEY_LOOKUP_COST;
DBUG_RETURN(read_time);
}
+IO_AND_CPU_COST ha_partition::rnd_pos_time(ha_rows rows)
+{
+ IO_AND_CPU_COST read_time= {0,0};
+ uint i;
+ uint partitions= bitmap_bits_set(&m_part_info->read_partitions);
+ if (partitions == 0)
+ return read_time;
+
+ ha_rows rows_per_part= (rows + partitions - 1)/partitions;
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
+ {
+ IO_AND_CPU_COST cost= m_file[i]->rnd_pos_time(rows_per_part);
+ read_time.io+= cost.io;
+ read_time.cpu+= cost.cpu;
+ }
+ return read_time;
+}
+
+
/**
Find number of records in a range.
@param inx Index number
@@ -9844,6 +9900,8 @@ ha_rows ha_partition::records_in_range(uint inx, const key_range *min_key,
if (estimated_rows && checked_rows &&
checked_rows >= min_rows_to_check)
{
+ /* We cannot use page ranges when there is more than one partion */
+ *pages= unused_page_range;
DBUG_PRINT("info",
("records_in_range(inx %u): %lu (%lu * %lu / %lu)",
inx,
@@ -9857,6 +9915,8 @@ ha_rows ha_partition::records_in_range(uint inx, const key_range *min_key,
DBUG_PRINT("info", ("records_in_range(inx %u): %lu",
inx,
(ulong) estimated_rows));
+ /* We cannot use page ranges when there is more than one partion */
+ *pages= unused_page_range;
DBUG_RETURN(estimated_rows);
}
@@ -9887,33 +9947,6 @@ ha_rows ha_partition::estimate_rows_upper_bound()
}
-/*
- Get time to read
-
- SYNOPSIS
- read_time()
- index Index number used
- ranges Number of ranges
- rows Number of rows
-
- RETURN VALUE
- time for read
-
- DESCRIPTION
- This will be optimised later to include whether or not the index can
- be used with partitioning. To achieve we need to add another parameter
- that specifies how many of the index fields that are bound in the ranges.
- Possibly added as a new call to handlers.
-*/
-
-double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
-{
- DBUG_ENTER("ha_partition::read_time");
-
- DBUG_RETURN(get_open_file_sample()->read_time(index, ranges, rows));
-}
-
-
/**
Number of rows in table. see handler.h
@@ -10751,13 +10784,6 @@ int ha_partition::cmp_ref(const uchar *ref1, const uchar *ref2)
DBUG_RETURN(0);
}
- /*
- In Innodb we compare with either primary key value or global DB_ROW_ID so
- it is not possible that the two references are equal and are in different
- partitions, but in myisam it is possible since we are comparing offsets.
- Remove this assert if DB_ROW_ID is changed to be per partition.
- */
- DBUG_ASSERT(!m_innodb);
DBUG_RETURN(diff2 > diff1 ? -1 : 1);
}
@@ -12135,6 +12161,38 @@ ha_partition::can_convert_nocopy(const Field &field,
return true;
}
+/*
+ Get table costs for the current statement that should be stored in
+ handler->cost variables.
+
+ When we want to support many different table handlers, we should set
+ m_file[i]->costs to point to an unique cost structure per open
+ instance and call something similar as
+ TABLE_SHARE::update_optimizer_costs(handlerton *hton) and
+ handler::update_optimizer_costs(&costs) on it.
+*/
+
+
+void ha_partition::set_optimizer_costs(THD *thd)
+{
+ handler::set_optimizer_costs(thd);
+ for (uint i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
+ m_file[i]->set_optimizer_costs(thd);
+}
+
+/*
+ Get unique table costs for the first instance of the handler and store
+ in table->share
+*/
+
+void ha_partition::update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+ uint i= bitmap_get_first_set(&m_part_info->read_partitions);
+ m_file[i]->update_optimizer_costs(costs);
+}
+
struct st_mysql_storage_engine partition_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 37529073391..86d8cdb7cee 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -919,7 +919,7 @@ public:
ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *mrr_mode,
+ uint *mrr_mode, ha_rows limit,
Cost_estimate *cost) override;
ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
uint key_parts, uint *bufsz,
@@ -1031,17 +1031,15 @@ public:
/*
Called in test_quick_select to determine if indexes should be used.
*/
- double scan_time() override;
+ IO_AND_CPU_COST scan_time() override;
- double key_scan_time(uint inx) override;
+ IO_AND_CPU_COST key_scan_time(uint inx, ha_rows rows) override;
- double keyread_time(uint inx, uint ranges, ha_rows rows) override;
+ IO_AND_CPU_COST keyread_time(uint inx, ulong ranges, ha_rows rows,
+ ulonglong blocks) override;
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows) override;
/*
- The next method will never be called if you do not implement indexes.
- */
- double read_time(uint index, uint ranges, ha_rows rows) override;
- /*
For the given range how many records are estimated to be in this range.
Used by optimiser to calculate cost of using a particular index.
*/
@@ -1637,5 +1635,8 @@ public:
bool can_convert_nocopy(const Field &field,
const Column_definition &new_field) const override;
+ void set_optimizer_costs(THD *thd) override;
+ void update_optimizer_costs(OPTIMIZER_COSTS *costs) override;
};
+
#endif /* HA_PARTITION_INCLUDED */
diff --git a/sql/handler.cc b/sql/handler.cc
index 8f318dcc961..595e76c708b 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -42,10 +42,12 @@
#include <pfs_transaction_provider.h>
#include <mysql/psi/mysql_transaction.h>
#include "debug_sync.h" // DEBUG_SYNC
+#include "debug.h" // debug_decrement_counter
#include "sql_audit.h"
#include "ha_sequence.h"
#include "rowid_filter.h"
#include "mysys_err.h"
+#include "optimizer_defaults.h"
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
@@ -621,8 +623,44 @@ int ha_finalize_handlerton(st_plugin_int *plugin)
}
-const char *hton_no_exts[]= { 0 };
+/*
+ Get a pointer to the global engine optimizer costs (like
+ innodb.disk_read_cost) and store the pointer in the handlerton.
+
+ This is called once when a handlerton is created.
+ We also update the not set global costs with the default costs
+ to allow information_schema to print the real used values.
+*/
+
+static bool update_optimizer_costs(handlerton *hton)
+{
+ OPTIMIZER_COSTS costs= default_optimizer_costs;
+ LEX_CSTRING *name= hton_name(hton);
+
+ if (hton->update_optimizer_costs)
+ hton->update_optimizer_costs(&costs);
+
+ mysql_mutex_lock(&LOCK_optimizer_costs);
+ hton->optimizer_costs= get_or_create_optimizer_costs(name->str,
+ name->length);
+ if (!hton->optimizer_costs)
+ {
+ mysql_mutex_unlock(&LOCK_optimizer_costs);
+ return 1; // OOM
+ }
+
+ /* Update not set values from current default costs */
+ for (uint i=0 ; i < sizeof(OPTIMIZER_COSTS)/sizeof(double) ; i++)
+ {
+ double *var= ((double*) hton->optimizer_costs)+i;
+ if (*var == OPTIMIZER_COST_UNDEF)
+ *var= ((double*) &costs)[i];
+ }
+ mysql_mutex_unlock(&LOCK_optimizer_costs);
+ return 0;
+}
+const char *hton_no_exts[]= { 0 };
int ha_initialize_handlerton(st_plugin_int *plugin)
{
@@ -725,6 +763,10 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
hton->savepoint_offset= savepoint_alloc_size;
savepoint_alloc_size+= tmp;
hton2plugin[hton->slot]=plugin;
+
+ if (!(hton->flags & HTON_HIDDEN) && update_optimizer_costs(hton))
+ goto err_deinit;
+
if (hton->prepare)
{
total_ha_2pc++;
@@ -764,7 +806,6 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
resolve_sysvar_table_options(hton);
update_discovery_counters(hton, 1);
-
DBUG_RETURN(0);
err_deinit:
@@ -3209,6 +3250,7 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root)
if (new_handler->ha_open(table, name, table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED, mem_root))
goto err;
+ new_handler->set_optimizer_costs(ha_thd());
return new_handler;
@@ -3242,30 +3284,111 @@ LEX_CSTRING *handler::engine_name()
/*
- It is assumed that the value of the parameter 'ranges' can be only 0 or 1.
- If ranges == 1 then the function returns the cost of index only scan
- by index 'keyno' of one range containing 'rows' key entries.
- If ranges == 0 then the function returns only the cost of copying
- those key entries into the engine buffers.
+ Calclate the number of index blocks we are going to access when
+ doing 'ranges' index dives reading a total of 'rows' rows.
*/
-double handler::keyread_time(uint index, uint ranges, ha_rows rows)
+ulonglong handler::index_blocks(uint index, uint ranges, ha_rows rows)
{
- DBUG_ASSERT(ranges == 0 || ranges == 1);
- size_t len= table->key_info[index].key_length + ref_length;
- if (table->file->is_clustering_key(index))
- len= table->s->stored_rec_length;
- double cost= (double)rows*len/(stats.block_size+1)*IDX_BLOCK_COPY_COST;
- if (ranges)
+ if (!stats.block_size)
+ return 0; // No disk storage
+ size_t len= table->key_storage_length(index);
+ ulonglong blocks= (rows * len / INDEX_BLOCK_FILL_FACTOR_DIV *
+ INDEX_BLOCK_FILL_FACTOR_MUL) / stats.block_size + ranges;
+ return blocks * stats.block_size / IO_SIZE;
+}
+
+
+/*
+ Calculate cost for an index scan for given index and number of records.
+
+ @param index Index to use
+ @param ranges Number of ranges (b-tree dives in case of b-tree).
+ Used by partition engine
+ @param rows Number of expected rows
+ @param blocks Number of disk blocks to read (from range optimizer).
+ 0 if not known
+
+ This function does not take in account into looking up the key,
+ copying the key to record and finding the next key. These cost are
+ handled in ha_keyread_time()
+*/
+
+IO_AND_CPU_COST handler::keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks)
+{
+ IO_AND_CPU_COST cost;
+ ulonglong io_blocks= 0;
+ DBUG_ASSERT(ranges > 0);
+
+ /* memory engine has stats.block_size == 0 */
+ if (stats.block_size)
{
- uint keys_per_block= (uint) (stats.block_size*3/4/len+1);
- ulonglong blocks= (rows+ keys_per_block- 1)/keys_per_block;
- cost+= blocks;
+ if (!blocks)
+ {
+ /* Estimate length of index data */
+ if (rows <= 1) // EQ_REF optimization
+ {
+ blocks= 1;
+ io_blocks= (stats.block_size + IO_SIZE - 1)/ IO_SIZE;
+ }
+ else
+ {
+ size_t len= table->key_storage_length(index);
+ blocks= ((ulonglong) ((rows * len / INDEX_BLOCK_FILL_FACTOR_DIV *
+ INDEX_BLOCK_FILL_FACTOR_MUL +
+ stats.block_size-1)) / stats.block_size +
+ (ranges - 1));
+ io_blocks= blocks * stats.block_size / IO_SIZE;
+ }
+ }
+ else
+ io_blocks= blocks * stats.block_size / IO_SIZE;
}
+ cost.io= (double) io_blocks;
+ cost.cpu= blocks * INDEX_BLOCK_COPY_COST;
+ return cost;
+}
+
+
+/*
+ Cost of doing a set of range scans and finding the key position.
+ This function is used both with index scans (in which case there should be
+ an additional KEY_COPY_COST) and when normal index + fetch row scan,
+ in which case there should an additional rnd_pos_time() cost.
+*/
+
+IO_AND_CPU_COST handler::ha_keyread_time(uint index, ulong ranges,
+ ha_rows rows,
+ ulonglong blocks)
+{
+ if (rows < ranges)
+ rows= ranges;
+ IO_AND_CPU_COST cost= keyread_time(index, ranges, rows, blocks);
+ cost.cpu+= ranges * KEY_LOOKUP_COST + (rows - ranges) * KEY_NEXT_FIND_COST;
return cost;
}
+/*
+ Read rows from a clustered index
+
+ Cost is similar to ha_rnd_pos_call_time() as a index_read() on a clustered
+ key has identical code as rnd_pos() (At least in InnoDB:)
+*/
+
+IO_AND_CPU_COST
+handler::ha_keyread_clustered_time(uint index, ulong ranges,
+ ha_rows rows,
+ ulonglong blocks)
+{
+ if (rows < ranges)
+ rows= ranges;
+ IO_AND_CPU_COST cost= keyread_time(index, ranges, rows, blocks);
+ cost.cpu+= (ranges * ROW_LOOKUP_COST + (rows - ranges) * ROW_NEXT_FIND_COST);
+ return cost;
+}
+
THD *handler::ha_thd(void) const
{
DBUG_ASSERT(!table || !table->in_use || table->in_use == current_thd);
@@ -3338,7 +3461,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
name, ht->db_type, table_arg->db_stat, mode,
test_if_locked));
- table= table_arg;
+ set_table(table_arg);
DBUG_ASSERT(table->s == table_share);
DBUG_ASSERT(m_lock_type == F_UNLCK);
DBUG_PRINT("info", ("old m_lock_type: %d F_UNLCK %d", m_lock_type, F_UNLCK));
@@ -3374,7 +3497,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
m_psi= PSI_CALL_open_table(ha_table_share_psi(), this);
}
- if (table->s->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
+ if (table_share->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
table->db_stat|=HA_READ_ONLY;
(void) extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL
@@ -3388,9 +3511,24 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
else
dup_ref=ref+ALIGN_SIZE(ref_length);
cached_table_flags= table_flags();
+ /* Cache index flags */
+ for (uint index= 0 ; index < table_share->keys ; index++)
+ table->key_info[index].index_flags= index_flags(index, 0, 1);
+
+ if (!table_share->optimizer_costs_inited)
+ {
+ table_share->optimizer_costs_inited=1;
+ /* Copy data from global 'engine'.optimizer_costs to TABLE_SHARE */
+ table_share->update_optimizer_costs(partition_ht());
+ /* Update costs depend on table structure */
+ update_optimizer_costs(&table_share->optimizer_costs);
+ }
+
+ /* Copy current optimizer costs. Needed in case clone() is used */
+ reset_statistics();
}
- reset_statistics();
internal_tmp_table= MY_TEST(test_if_locked & HA_OPEN_INTERNAL_TABLE);
+
DBUG_RETURN(error);
}
@@ -3418,6 +3556,15 @@ int handler::ha_close(void)
DBUG_RETURN(close());
}
+void handler::change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
+{
+ DBUG_ASSERT(table_arg->s == share);
+ table= table_arg;
+ table_share= share;
+ costs= &share->optimizer_costs;
+ reset_statistics();
+}
+
int handler::ha_rnd_next(uchar *buf)
{
@@ -3427,6 +3574,15 @@ int handler::ha_rnd_next(uchar *buf)
m_lock_type != F_UNLCK);
DBUG_ASSERT(inited == RND);
+ DBUG_EXECUTE_IF("ha_rnd_next_error",
+ {
+ LEX_CSTRING user_var= { STRING_WITH_LEN("ha_rnd_next_error_counter") };
+ if (debug_decrement_counter(&user_var))
+ {
+ print_error(HA_ERR_WRONG_IN_RECORD,MYF(0));
+ DBUG_RETURN(HA_ERR_WRONG_IN_RECORD);
+ }
+ });
do
{
TABLE_IO_WAIT(tracker, PSI_TABLE_FETCH_ROW, MAX_KEY, result,
@@ -3450,6 +3606,9 @@ int handler::ha_rnd_next(uchar *buf)
}
table->status=result ? STATUS_NOT_FOUND: 0;
+
+ DEBUG_SYNC(ha_thd(), "handler_rnd_next_end");
+
DBUG_RETURN(result);
}
@@ -3672,7 +3831,7 @@ int handler::read_first_row(uchar * buf, uint primary_key)
TODO remove the test for HA_READ_ORDER
*/
if (stats.deleted < 10 || primary_key >= MAX_KEY ||
- !(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
+ !(table->key_info[primary_key].index_flags & HA_READ_ORDER))
{
if (likely(!(error= ha_rnd_init(1))))
{
@@ -6466,7 +6625,7 @@ bool Discovered_table_list::add_file(const char *fname)
{
bool is_temp= strncmp(fname, STRING_WITH_LEN(tmp_file_prefix)) == 0;
- if (is_temp && !with_temps)
+ if ((is_temp && !with_temps) || !strncmp(fname,STRING_WITH_LEN(ROCKSDB_DIRECTORY_NAME)))
return 0;
char tname[SAFE_NAME_LEN + 1];
@@ -6781,17 +6940,25 @@ extern "C" check_result_t handler_index_cond_check(void* h_arg)
check_result_t res;
DEBUG_SYNC(thd, "handler_index_cond_check");
- enum thd_kill_levels abort_at= h->has_rollback() ?
- THD_ABORT_SOFTLY : THD_ABORT_ASAP;
- if (thd_kill_level(thd) > abort_at)
- return CHECK_ABORTED_BY_USER;
- if (h->end_range && h->compare_key2(h->end_range) > 0)
+ enum thd_kill_levels killed= thd_kill_level(thd);
+ if (unlikely(killed != THD_IS_NOT_KILLED))
+ {
+ enum thd_kill_levels abort_at= (h->has_transactions() ?
+ THD_ABORT_SOFTLY :
+ THD_ABORT_ASAP);
+ if (killed > abort_at)
+ return CHECK_ABORTED_BY_USER;
+ }
+ if (unlikely(h->end_range) && h->compare_key2(h->end_range) > 0)
return CHECK_OUT_OF_RANGE;
h->increment_statistics(&SSV::ha_icp_attempts);
- if ((res= h->pushed_idx_cond->val_int()? CHECK_POS : CHECK_NEG) ==
- CHECK_POS)
- h->increment_statistics(&SSV::ha_icp_match);
+ res= CHECK_NEG;
+ if (h->pushed_idx_cond->val_int())
+ {
+ res= CHECK_POS;
+ h->fast_increment_statistics(&SSV::ha_icp_match);
+ }
return res;
}
@@ -6815,17 +6982,23 @@ check_result_t handler_rowid_filter_check(void *h_arg)
{
THD *thd= h->table->in_use;
DEBUG_SYNC(thd, "handler_rowid_filter_check");
- enum thd_kill_levels abort_at= h->has_transactions() ?
- THD_ABORT_SOFTLY : THD_ABORT_ASAP;
- if (thd_kill_level(thd) > abort_at)
- return CHECK_ABORTED_BY_USER;
+
+ enum thd_kill_levels killed= thd_kill_level(thd);
+ if (unlikely(killed != THD_IS_NOT_KILLED))
+ {
+ enum thd_kill_levels abort_at= (h->has_transactions() ?
+ THD_ABORT_SOFTLY :
+ THD_ABORT_ASAP);
+ if (killed > abort_at)
+ return CHECK_ABORTED_BY_USER;
+ }
if (h->end_range && h->compare_key2(h->end_range) > 0)
return CHECK_OUT_OF_RANGE;
}
h->position(tab->record[0]);
- return h->pushed_rowid_filter->check((char*)h->ref)? CHECK_POS: CHECK_NEG;
+ return h->pushed_rowid_filter->check((char*)h->ref) ? CHECK_POS: CHECK_NEG;
}
@@ -6836,8 +7009,7 @@ check_result_t handler_rowid_filter_check(void *h_arg)
extern "C" int handler_rowid_filter_is_active(void *h_arg)
{
- if (!h_arg)
- return false;
+ DBUG_ASSERT(h_arg);
handler *h= (handler*) h_arg;
return h->rowid_filter_is_active;
}
@@ -8748,3 +8920,21 @@ Table_scope_and_contents_source_st::fix_period_fields(THD *thd,
}
return false;
}
+
+/*
+ Copy upper level cost to the engine as part of start statement
+
+ This is needed to provide fast access to these variables during
+ optimization (as we refer to them multiple times during one query).
+
+ The other option would be to access them from THD, but that would
+ require a function call (as we cannot easily access THD from an
+ inline handler function) and two extra memory accesses for each
+ variable.
+*/
+
+void handler::set_optimizer_costs(THD *thd)
+{
+ optimizer_where_cost= thd->variables.optimizer_where_cost;
+ optimizer_scan_setup_cost= thd->variables.optimizer_scan_setup_cost;
+}
diff --git a/sql/handler.h b/sql/handler.h
index 77c77c83c0f..0810d1a503a 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -28,6 +28,7 @@
#include "sql_const.h"
#include "sql_basic_types.h"
#include "mysqld.h" /* server_id */
+#include "optimizer_costs.h"
#include "sql_plugin.h" /* plugin_ref, st_plugin_int, plugin */
#include "thr_lock.h" /* thr_lock_type, THR_LOCK_DATA */
#include "sql_cache.h"
@@ -35,6 +36,7 @@
#include "sql_array.h" /* Dynamic_array<> */
#include "mdl.h"
#include "vers_string.h"
+#include "optimizer_costs.h"
#include "sql_analyze_stmt.h" // for Exec_time_tracker
@@ -1045,6 +1047,7 @@ enum enum_schema_tables
SCH_KEY_CACHES,
SCH_KEY_COLUMN_USAGE,
SCH_OPEN_TABLES,
+ SCH_OPTIMIZER_COSTS,
SCH_OPT_TRACE,
SCH_PARAMETERS,
SCH_PARTITIONS,
@@ -1495,6 +1498,10 @@ struct handlerton
/* Called for all storage handlers after ddl recovery is done */
void (*signal_ddl_recovery_done)(handlerton *hton);
+ /* Called at startup to update default engine costs */
+ void (*update_optimizer_costs)(OPTIMIZER_COSTS *costs);
+ void *optimizer_costs; /* Costs are stored here */
+
/*
Optional clauses in the CREATE/ALTER TABLE
*/
@@ -2770,113 +2777,121 @@ typedef struct st_range_seq_if
typedef bool (*SKIP_INDEX_TUPLE_FUNC) (range_seq_t seq, range_id_t range_info);
+#define MARIADB_NEW_COST_MODEL 1
+/* Separated costs for IO and CPU */
+
+struct IO_AND_CPU_COST
+{
+ double io;
+ double cpu;
+
+ void add(IO_AND_CPU_COST cost)
+ {
+ io+= cost.io;
+ cpu+= cost.cpu;
+ }
+};
+
+/* Cost for reading a row through an index */
+struct ALL_READ_COST
+{
+ IO_AND_CPU_COST index_cost, row_cost;
+ longlong max_index_blocks, max_row_blocks;
+ /* index_only_read = index_cost + copy_cost */
+ double copy_cost;
+
+ void reset()
+ {
+ row_cost= {0,0};
+ index_cost= {0,0};
+ max_index_blocks= max_row_blocks= 0;
+ copy_cost= 0.0;
+ }
+};
+
+
class Cost_estimate
{
public:
- double io_count; /* number of I/O to fetch records */
double avg_io_cost; /* cost of an average I/O oper. to fetch records */
- double idx_io_count; /* number of I/O to read keys */
- double idx_avg_io_cost; /* cost of an average I/O oper. to fetch records */
- double cpu_cost; /* total cost of operations in CPU */
- double idx_cpu_cost; /* cost of operations in CPU for index */
- double import_cost; /* cost of remote operations */
- double mem_cost; /* cost of used memory */
-
- static constexpr double IO_COEFF= 1;
- static constexpr double CPU_COEFF= 1;
- static constexpr double MEM_COEFF= 1;
- static constexpr double IMPORT_COEFF= 1;
+ double cpu_cost; /* Cpu cost unrelated to engine costs */
+ double comp_cost; /* Cost of comparing found rows with WHERE clause */
+ double copy_cost; /* Copying the data to 'record' */
+ double limit_cost; /* Total cost when restricting rows with limit */
+
+ IO_AND_CPU_COST index_cost;
+ IO_AND_CPU_COST row_cost;
Cost_estimate()
{
reset();
}
+ /*
+ Total cost for the range
+ Note that find_cost() + compare_cost() + data_copy_cost() == total_cost()
+ */
+
double total_cost() const
{
- return IO_COEFF*io_count*avg_io_cost +
- IO_COEFF*idx_io_count*idx_avg_io_cost +
- CPU_COEFF*(cpu_cost + idx_cpu_cost) +
- MEM_COEFF*mem_cost + IMPORT_COEFF*import_cost;
+ return ((index_cost.io + row_cost.io) * avg_io_cost+
+ index_cost.cpu + row_cost.cpu + comp_cost + copy_cost +
+ cpu_cost);
}
- double index_only_cost()
+ /* Cost for just fetching and copying a row (no compare costs) */
+ double fetch_cost() const
{
- return IO_COEFF*idx_io_count*idx_avg_io_cost +
- CPU_COEFF*idx_cpu_cost;
+ return ((index_cost.io + row_cost.io) * avg_io_cost+
+ index_cost.cpu + row_cost.cpu + copy_cost);
}
- /**
- Whether or not all costs in the object are zero
-
- @return true if all costs are zero, false otherwise
+ /*
+ Cost of copying the row or key to 'record'
*/
- bool is_zero() const
- {
- return io_count == 0.0 && idx_io_count == 0.0 && cpu_cost == 0.0 &&
- import_cost == 0.0 && mem_cost == 0.0;
- }
-
- void reset()
+ inline double data_copy_cost() const
{
- avg_io_cost= 1.0;
- idx_avg_io_cost= 1.0;
- io_count= idx_io_count= cpu_cost= idx_cpu_cost= mem_cost= import_cost= 0.0;
+ return copy_cost;
}
- void multiply(double m)
+ /*
+ Multiply costs to simulate a scan where we read
+ We assume that io blocks will be cached and we only
+ allocate memory once. There should also be no import_cost
+ that needs to be done multiple times
+ */
+ void multiply(uint n)
{
- io_count *= m;
- cpu_cost *= m;
- idx_io_count *= m;
- idx_cpu_cost *= m;
- import_cost *= m;
- /* Don't multiply mem_cost */
+ index_cost.io*= n;
+ index_cost.cpu*= n;
+ row_cost.io*= n;
+ row_cost.cpu*= n;
+ copy_cost*= n;
+ comp_cost*= n;
+ cpu_cost*= n;
}
- void add(const Cost_estimate* cost)
+ void add(Cost_estimate *cost)
{
- if (cost->io_count != 0.0)
- {
- double io_count_sum= io_count + cost->io_count;
- avg_io_cost= (io_count * avg_io_cost +
- cost->io_count * cost->avg_io_cost)
- /io_count_sum;
- io_count= io_count_sum;
- }
- if (cost->idx_io_count != 0.0)
- {
- double idx_io_count_sum= idx_io_count + cost->idx_io_count;
- idx_avg_io_cost= (idx_io_count * idx_avg_io_cost +
- cost->idx_io_count * cost->idx_avg_io_cost)
- /idx_io_count_sum;
- idx_io_count= idx_io_count_sum;
- }
- cpu_cost += cost->cpu_cost;
- idx_cpu_cost += cost->idx_cpu_cost;
- import_cost += cost->import_cost;
+ avg_io_cost= cost->avg_io_cost;
+ index_cost.io+= cost->index_cost.io;
+ index_cost.cpu+= cost->index_cost.cpu;
+ row_cost.io+= cost->row_cost.io;
+ row_cost.cpu+= cost->row_cost.cpu;
+ copy_cost+= cost->copy_cost;
+ comp_cost+= cost->comp_cost;
+ cpu_cost+= cost->cpu_cost;
}
- void add_io(double add_io_cnt, double add_avg_cost)
+ inline void reset()
{
- /* In edge cases add_io_cnt may be zero */
- if (add_io_cnt > 0)
- {
- double io_count_sum= io_count + add_io_cnt;
- avg_io_cost= (io_count * avg_io_cost +
- add_io_cnt * add_avg_cost) / io_count_sum;
- io_count= io_count_sum;
- }
+ avg_io_cost= 0;
+ comp_cost= cpu_cost= 0.0;
+ copy_cost= limit_cost= 0.0;
+ index_cost= {0,0};
+ row_cost= {0,0};
}
-
- /// Add to CPU cost
- void add_cpu(double add_cpu_cost) { cpu_cost+= add_cpu_cost; }
-
- /// Add to import cost
- void add_import(double add_import_cost) { import_cost+= add_import_cost; }
-
- /// Add to memory cost
- void add_mem(double add_mem_cost) { mem_cost+= add_mem_cost; }
+ inline void reset(handler *file);
/*
To be used when we go from old single value-based cost calculations to
@@ -2885,13 +2900,10 @@ public:
void convert_from_cost(double cost)
{
reset();
- io_count= cost;
+ cpu_cost= cost;
}
};
-void get_sweep_read_cost(TABLE *table, ha_rows nrows, bool interrupted,
- Cost_estimate *cost);
-
/*
Indicates that all scanned ranges will be singlepoint (aka equality) ranges.
The ranges may not use the full key but all of them will use the same number
@@ -3065,6 +3077,7 @@ enum class Compare_keys : uint32_t
NotEqual
};
+
/**
The handler class is the interface for dynamically loadable
storage engines. Do not add ifdefs and take care when adding or
@@ -3125,9 +3138,10 @@ protected:
ha_rows estimation_rows_to_insert;
handler *lookup_handler;
public:
- handlerton *ht; /* storage engine of this handler */
- uchar *ref; /* Pointer to current row */
- uchar *dup_ref; /* Pointer to duplicate row */
+ handlerton *ht; /* storage engine of this handler */
+ OPTIMIZER_COSTS *costs; /* Points to table->share->costs */
+ uchar *ref; /* Pointer to current row */
+ uchar *dup_ref; /* Pointer to duplicate row */
uchar *lookup_buffer;
ha_statistics stats;
@@ -3138,6 +3152,7 @@ public:
HANDLER_BUFFER *multi_range_buffer; /* MRR buffer info */
uint ranges_in_seq; /* Total number of ranges in the traversed sequence */
/** Current range (the one we're now returning rows from) */
+
KEY_MULTI_RANGE mrr_cur_range;
/** The following are for read_range() */
@@ -3318,13 +3333,15 @@ private:
For non partitioned handlers this is &TABLE_SHARE::ha_share.
*/
Handler_share **ha_share;
+ double optimizer_where_cost; // Copy of THD->...optimzer_where_cost
+ double optimizer_scan_setup_cost; // Copy of THD->...optimzer_scan_...
public:
handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), table(0),
estimation_rows_to_insert(0),
lookup_handler(this),
- ht(ht_arg), ref(0), lookup_buffer(NULL), end_range(NULL),
+ ht(ht_arg), costs(0), ref(0), lookup_buffer(NULL), end_range(NULL),
implicit_emptied(0),
mark_trx_read_write_done(0),
check_table_binlog_row_based_done(0),
@@ -3348,12 +3365,19 @@ public:
m_psi_numrows(0),
m_psi_locker(NULL),
row_logging(0), row_logging_init(0),
- m_lock_type(F_UNLCK), ha_share(NULL)
+ m_lock_type(F_UNLCK), ha_share(NULL), optimizer_where_cost(0),
+ optimizer_scan_setup_cost(0)
{
DBUG_PRINT("info",
("handler created F_UNLCK %d F_RDLCK %d F_WRLCK %d",
F_UNLCK, F_RDLCK, F_WRLCK));
reset_statistics();
+ /*
+ The following variables should be updated in set_optimizer_costs()
+ which is to be run as part of setting up the table for the query
+ */
+ MEM_UNDEFINED(&optimizer_where_cost, sizeof(optimizer_where_cost));
+ MEM_UNDEFINED(&optimizer_scan_setup_cost, sizeof(optimizer_scan_setup_cost));
}
virtual ~handler(void)
{
@@ -3456,14 +3480,15 @@ public:
int ha_delete_row(const uchar * buf);
void ha_release_auto_increment();
- bool keyread_enabled() { return keyread < MAX_KEY; }
- int ha_start_keyread(uint idx)
+ inline bool keyread_enabled() { return keyread < MAX_KEY; }
+ inline int ha_start_keyread(uint idx)
{
- int res= keyread_enabled() ? 0 : extra_opt(HA_EXTRA_KEYREAD, idx);
+ if (keyread_enabled())
+ return 0;
keyread= idx;
- return res;
+ return extra_opt(HA_EXTRA_KEYREAD, idx);
}
- int ha_end_keyread()
+ inline int ha_end_keyread()
{
if (!keyread_enabled())
return 0;
@@ -3554,51 +3579,250 @@ public:
bzero(&copy_info, sizeof(copy_info));
reset_copy_info();
}
- virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
+ virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
+
+ inline double io_cost(IO_AND_CPU_COST cost)
{
- table= table_arg;
- table_share= share;
- reset_statistics();
+ return cost.io * DISK_READ_COST * DISK_READ_RATIO;
}
- virtual double scan_time()
+
+ inline double cost(IO_AND_CPU_COST cost)
+ {
+ return io_cost(cost) + cost.cpu;
+ }
+
+ /*
+ Calculate cost with capping io_blocks to the given maximum.
+ This is done here instead of earlier to allow filtering to work
+ with the original' io_block counts.
+ */
+ inline double cost(ALL_READ_COST *cost)
{
- return ((ulonglong2double(stats.data_file_length) / stats.block_size + 2) *
- avg_io_cost());
+ double blocks= (MY_MIN(cost->index_cost.io,(double) cost->max_index_blocks) +
+ MY_MIN(cost->row_cost.io, (double) cost->max_row_blocks));
+ return ((cost->index_cost.cpu + cost->row_cost.cpu + cost->copy_cost) +
+ blocks * DISK_READ_COST * DISK_READ_RATIO);
}
- virtual double key_scan_time(uint index)
+ /*
+ Calculate cost when we are going to excute the given read method
+ multiple times
+ */
+ inline double cost_for_reading_multiple_times(double multiple,
+ ALL_READ_COST *cost)
+
{
- return keyread_time(index, 1, records());
+ double blocks= (MY_MIN(cost->index_cost.io * multiple,
+ (double) cost->max_index_blocks) +
+ MY_MIN(cost->row_cost.io * multiple,
+ (double) cost->max_row_blocks));
+ return ((cost->index_cost.cpu + cost->row_cost.cpu + cost->copy_cost) *
+ multiple +
+ blocks * DISK_READ_COST * DISK_READ_RATIO);
}
- virtual double avg_io_cost()
+ inline ulonglong row_blocks()
{
- return 1.0;
+ return (stats.data_file_length + IO_SIZE-1) / IO_SIZE;
}
- /**
- The cost of reading a set of ranges from the table using an index
- to access it.
-
- @param index The index number.
- @param ranges The number of ranges to be read. If 0, it means that
- we calculate separately the cost of reading the key.
- @param rows Total number of rows to be read.
-
- This method can be used to calculate the total cost of scanning a table
- using an index by calling it using read_time(index, 1, table_size).
+ virtual ulonglong index_blocks(uint index, uint ranges, ha_rows rows);
+
+ inline ulonglong index_blocks(uint index)
+ {
+ return index_blocks(index, 1, stats.records);
+ }
+
+ /*
+ Time for a full table data scan. To be overrided by engines, should not
+ be used by the sql level.
+ */
+protected:
+ virtual IO_AND_CPU_COST scan_time()
+ {
+ IO_AND_CPU_COST cost;
+ ulonglong length= stats.data_file_length;
+ cost.io= (double) (length / IO_SIZE);
+ cost.cpu= (!stats.block_size ? 0.0 :
+ (double) ((length + stats.block_size-1)/stats.block_size) *
+ INDEX_BLOCK_COPY_COST);
+ return cost;
+ }
+public:
+
+ /*
+ Time for a full table scan
+
+ @param records Number of records from the engine or records from
+ status tables stored by ANALYZE TABLE.
+
+ The TABLE_SCAN_SETUP_COST is there to prefer range scans to full
+ table scans. This is mainly to make the test suite happy as
+ many tests has very few rows. In real life tables has more than
+ a few rows and the extra cost has no practical effect.
+ */
+
+ inline IO_AND_CPU_COST ha_scan_time(ha_rows rows)
+ {
+ IO_AND_CPU_COST cost= scan_time();
+ cost.cpu+= (TABLE_SCAN_SETUP_COST +
+ (double) rows * (ROW_NEXT_FIND_COST + ROW_COPY_COST));
+ return cost;
+ }
+
+ /*
+ Time for a full table scan, fetching the rows from the table and comparing
+ the row with the where clause
*/
- virtual double read_time(uint index, uint ranges, ha_rows rows)
- { return rows2double(ranges+rows); }
+ inline IO_AND_CPU_COST ha_scan_and_compare_time(ha_rows rows)
+ {
+ IO_AND_CPU_COST cost= ha_scan_time(rows);
+ cost.cpu+= (double) rows * WHERE_COST;
+ return cost;
+ }
+
+ /*
+ Update table->share optimizer costs for this particular table.
+ Called once when table is opened the first time.
+ */
+ virtual void update_optimizer_costs(OPTIMIZER_COSTS *costs) {}
+
+ /*
+ Set handler optimizer cost variables.
+ Called for each table used by the statment
+ This is virtual mainly for the partition engine.
+ */
+ virtual void set_optimizer_costs(THD *thd);
+
+protected:
+ /*
+ Cost of reading 'rows' number of rows with a rowid
+ */
+ virtual IO_AND_CPU_COST rnd_pos_time(ha_rows rows)
+ {
+ double r= rows2double(rows);
+ return
+ {
+ r * ((stats.block_size + IO_SIZE -1 )/IO_SIZE), // Blocks read
+ r * INDEX_BLOCK_COPY_COST // Copy block from cache
+ };
+ }
+public:
+
+ /*
+ Time for doing and internal rnd_pos() inside the engine. For some
+ engine, this is more efficient than the SQL layer calling
+ rnd_pos() as there is no overhead in converting/checking the
+ rnd_pos_value. This is used when calculating the cost of fetching
+ a key+row in one go (like when scanning an index and fetching the
+ row).
+ */
+
+ inline IO_AND_CPU_COST ha_rnd_pos_time(ha_rows rows)
+ {
+ IO_AND_CPU_COST cost= rnd_pos_time(rows);
+ set_if_smaller(cost.io, (double) row_blocks());
+ cost.cpu+= rows2double(rows) * (ROW_LOOKUP_COST + ROW_COPY_COST);
+ return cost;
+ }
+
+ /*
+ This cost if when we are calling rnd_pos() explict in the call
+ For the moment this function is identical to ha_rnd_pos time,
+ but that may change in the future after we do more cost checks for
+ more engines.
+ */
+ inline IO_AND_CPU_COST ha_rnd_pos_call_time(ha_rows rows)
+ {
+ IO_AND_CPU_COST cost= rnd_pos_time(rows);
+ set_if_smaller(cost.io, (double) row_blocks());
+ cost.cpu+= rows2double(rows) * (ROW_LOOKUP_COST + ROW_COPY_COST);
+ return cost;
+ }
+
+ inline IO_AND_CPU_COST ha_rnd_pos_call_and_compare_time(ha_rows rows)
+ {
+ IO_AND_CPU_COST cost;
+ cost= ha_rnd_pos_call_time(rows);
+ cost.cpu+= rows2double(rows) * WHERE_COST;
+ return cost;
+ }
/**
- Calculate cost of 'keyread' scan for given index and number of records.
+ Calculate cost of 'index_only' scan for given index, a number of ranges
+ and number of records.
+
+ @param index Index to read
+ @param rows #of records to read
+ @param blocks Number of IO blocks that needs to be accessed.
+ 0 if not known (in which case it's calculated)
+ */
+protected:
+ virtual IO_AND_CPU_COST keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks);
+public:
+
+ /*
+ Calculate cost of 'keyread' scan for given index and number of records
+ including fetching the key to the 'record' buffer.
+ */
+ IO_AND_CPU_COST ha_keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks);
+
+ /* Same as above, but take into account copying the key the the SQL layer */
+ inline IO_AND_CPU_COST ha_keyread_and_copy_time(uint index, ulong ranges,
+ ha_rows rows,
+ ulonglong blocks)
+ {
+ IO_AND_CPU_COST cost= ha_keyread_time(index, ranges, rows, blocks);
+ cost.cpu+= (double) rows * KEY_COPY_COST;
+ return cost;
+ }
+
+ inline IO_AND_CPU_COST ha_keyread_and_compare_time(uint index, ulong ranges,
+ ha_rows rows,
+ ulonglong blocks)
+ {
+ IO_AND_CPU_COST cost= ha_keyread_time(index, ranges, rows, blocks);
+ cost.cpu+= (double) rows * (KEY_COPY_COST + WHERE_COST);
+ return cost;
+ }
+
+ IO_AND_CPU_COST ha_keyread_clustered_time(uint index,
+ ulong ranges,
+ ha_rows rows,
+ ulonglong blocks);
+ /*
+ Time for a full table index scan (without copy or compare cost).
+ To be overrided by engines, sql level should use ha_key_scan_time().
+ Note that IO_AND_CPU_COST does not include avg_io_cost() !
+ */
+protected:
+ virtual IO_AND_CPU_COST key_scan_time(uint index, ha_rows rows)
+ {
+ return keyread_time(index, 1, MY_MAX(rows, 1), 0);
+ }
+public:
+
+ /* Cost of doing a full index scan */
+ inline IO_AND_CPU_COST ha_key_scan_time(uint index, ha_rows rows)
+ {
+ IO_AND_CPU_COST cost= key_scan_time(index, rows);
+ cost.cpu+= (INDEX_SCAN_SETUP_COST + KEY_LOOKUP_COST +
+ (double) rows * (KEY_NEXT_FIND_COST + KEY_COPY_COST));
+ return cost;
+ }
- @param index index to read
- @param ranges #of ranges to read
- @param rows #of records to read
+ /*
+ Cost of doing a full index scan with record copy and compare
+ @param rows Rows from stat tables
*/
- virtual double keyread_time(uint index, uint ranges, ha_rows rows);
+ inline IO_AND_CPU_COST ha_key_scan_and_compare_time(uint index, ha_rows rows)
+ {
+ IO_AND_CPU_COST cost= ha_key_scan_time(index, rows);
+ cost.cpu+= (double) rows * WHERE_COST;
+ return cost;
+ }
virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; }
@@ -3903,7 +4127,7 @@ public:
virtual ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *mrr_mode,
+ uint *mrr_mode, ha_rows limit,
Cost_estimate *cost);
virtual ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
uint key_parts, uint *bufsz,
@@ -3912,6 +4136,13 @@ public:
uint n_ranges, uint mrr_mode,
HANDLER_BUFFER *buf);
virtual int multi_range_read_next(range_id_t *range_info);
+private:
+ inline void calculate_costs(Cost_estimate *cost, uint keyno,
+ uint ranges, uint multi_row_ranges, uint flags,
+ ha_rows total_rows,
+ ulonglong io_blocks,
+ ulonglong unassigned_single_point_ranges);
+public:
/*
Return string representation of the MRR plan.
@@ -4318,6 +4549,7 @@ public:
For a clustered (primary) key, the following should also hold:
index_flags() should contain HA_CLUSTERED_INDEX
+ index_flags() should not contain HA_KEYREAD_ONLY or HA_DO_RANGE_FILTER_PUSHDOWN
table_flags() should contain HA_TABLE_SCAN_ON_INDEX
For a reference key the following should also hold:
@@ -4328,20 +4560,9 @@ public:
*/
/* The following code is for primary keys */
- bool pk_is_clustering_key(uint index) const
- {
- /*
- We have to check for MAX_INDEX as table->s->primary_key can be
- MAX_KEY in the case where there is no primary key.
- */
- return index != MAX_KEY && is_clustering_key(index);
- }
+ inline bool pk_is_clustering_key(uint index) const;
/* Same as before but for other keys, in which case we can skip the check */
- bool is_clustering_key(uint index) const
- {
- DBUG_ASSERT(index != MAX_KEY);
- return (index_flags(index, 0, 1) & HA_CLUSTERED_INDEX);
- }
+ inline bool is_clustering_key(uint index) const;
virtual int cmp_ref(const uchar *ref1, const uchar *ref2)
{
@@ -4424,7 +4645,11 @@ public:
virtual void cancel_pushed_rowid_filter()
{
pushed_rowid_filter= NULL;
- rowid_filter_is_active= false;
+ if (rowid_filter_is_active)
+ {
+ rowid_filter_is_active= false;
+ rowid_filter_changed();
+ }
}
virtual void disable_pushed_rowid_filter()
@@ -4432,10 +4657,14 @@ public:
DBUG_ASSERT(pushed_rowid_filter != NULL &&
save_pushed_rowid_filter == NULL);
save_pushed_rowid_filter= pushed_rowid_filter;
- if (rowid_filter_is_active)
- save_rowid_filter_is_active= rowid_filter_is_active;
+ save_rowid_filter_is_active= rowid_filter_is_active;
pushed_rowid_filter= NULL;
- rowid_filter_is_active= false;
+
+ if (rowid_filter_is_active)
+ {
+ rowid_filter_is_active= false;
+ rowid_filter_changed();
+ }
}
virtual void enable_pushed_rowid_filter()
@@ -4443,12 +4672,17 @@ public:
DBUG_ASSERT(save_pushed_rowid_filter != NULL &&
pushed_rowid_filter == NULL);
pushed_rowid_filter= save_pushed_rowid_filter;
+ save_pushed_rowid_filter= NULL;
if (save_rowid_filter_is_active)
+ {
rowid_filter_is_active= true;
- save_pushed_rowid_filter= NULL;
+ rowid_filter_changed();
+ }
}
virtual bool rowid_filter_push(Rowid_filter *rowid_filter) { return true; }
+ /* Signal that rowid filter may have been enabled / disabled */
+ virtual void rowid_filter_changed() {}
/* Needed for partition / spider */
virtual TABLE_LIST *get_next_global_for_child() { return NULL; }
@@ -4768,7 +5002,6 @@ private:
}
}
-private:
void mark_trx_read_write_internal();
bool check_table_binlog_row_based_internal();
@@ -4786,7 +5019,13 @@ protected:
However, engines that implement read_range_XXX() (like MariaRocks)
or embed other engines (like ha_partition) may need to call these also
*/
+ /*
+ Increment statistics. As a side effect increase accessed_rows_and_keys
+ and checks if lex->limit_rows_examined_cnt is reached
+ */
inline void increment_statistics(ulong SSV::*offset) const;
+ /* Same as increment_statistics but doesn't increase accessed_rows_and_keys */
+ inline void fast_increment_statistics(ulong SSV::*offset) const;
inline void decrement_statistics(ulong SSV::*offset) const;
private:
@@ -5045,7 +5284,7 @@ public:
ha_share= arg_ha_share;
return false;
}
- void set_table(TABLE* table_arg) { table= table_arg; }
+ inline void set_table(TABLE* table_arg);
int get_lock_type() const { return m_lock_type; }
public:
/* XXX to be removed, see ha_partition::partition_ht() */
@@ -5117,6 +5356,12 @@ protected:
void set_ha_share_ptr(Handler_share *arg_ha_share);
void lock_shared_ha_data();
void unlock_shared_ha_data();
+
+ /*
+ Mroonga needs to call some xxx_time() directly for it's internal handler
+ methods
+ */
+ friend class ha_mroonga;
};
#include "multi_range_read.h"
@@ -5391,4 +5636,10 @@ uint ha_count_rw_2pc(THD *thd, bool all);
uint ha_check_and_coalesce_trx_read_only(THD *thd, Ha_trx_info *ha_list,
bool all);
+inline void Cost_estimate::reset(handler *file)
+{
+ reset();
+ avg_io_cost= file->DISK_READ_COST * file->DISK_READ_RATIO;
+}
+
#endif /* HANDLER_INCLUDED */
diff --git a/sql/item.cc b/sql/item.cc
index 298da95905a..92f3d55fcf9 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -42,6 +42,7 @@
// RESOLVED_AGAINST_ALIAS, ...
#include "sql_expression_cache.h"
#include "sql_lex.h" // empty_clex_str
+#include "my_json_writer.h" // for dbug_print_opt_trace()
const String my_null_string("NULL", 4, default_charset_info);
const String my_default_string("DEFAULT", 7, default_charset_info);
@@ -411,7 +412,7 @@ int Item::save_str_value_in_field(Field *field, String *result)
Item::Item(THD *thd):
- name(null_clex_str), orig_name(0), is_expensive_cache(-1)
+ name(null_clex_str), orig_name(null_clex_str), is_expensive_cache(-1)
{
DBUG_ASSERT(thd);
base_flags= item_base_t::FIXED;
@@ -444,7 +445,7 @@ Item::Item(THD *thd):
*/
Item::Item():
- name(null_clex_str), orig_name(0), is_expensive_cache(-1)
+ name(null_clex_str), orig_name(null_clex_str), is_expensive_cache(-1)
{
DBUG_ASSERT(!mysqld_server_started); // Created early
base_flags= item_base_t::FIXED;
@@ -552,11 +553,8 @@ void Item::cleanup()
DBUG_PRINT("enter", ("this: %p", this));
marker= MARKER_UNUSED;
join_tab_idx= MAX_TABLES;
- if (orig_name)
- {
- name.str= orig_name;
- name.length= strlen(orig_name);
- }
+ if (orig_name.str)
+ name= orig_name;
DBUG_VOID_RETURN;
}
@@ -6348,6 +6346,24 @@ Item_equal *Item_field::find_item_equal(COND_EQUAL *cond_equal)
}
+/*
+ Check if field is is equal to current field or any of the fields in
+ item_equal
+*/
+
+bool Item_field::contains(Field *field_arg)
+{
+ if (field == field_arg)
+ return 1;
+ /*
+ Check if there is a multiple equality that allows to infer that field
+ (see also: compute_part_of_sort_key_for_equals)
+ */
+ if (item_equal && item_equal->contains(field_arg))
+ return 1;
+ return 0;
+}
+
/**
Set a pointer to the multiple equality the field reference belongs to
(if any).
@@ -10872,6 +10888,29 @@ const char *dbug_print_item(Item *item)
return "Couldn't fit into buffer";
}
+
+/*
+ Return the optimizer trace collected so far for the current thread.
+*/
+
+const char *dbug_print_opt_trace()
+{
+ if (current_thd)
+ {
+ if (current_thd->opt_trace.is_started())
+ {
+ String *s= const_cast<String *>(current_thd->opt_trace
+ .get_current_json()->output.get_string());
+ return s->c_ptr();
+ }
+ else
+ return "Trace empty";
+ }
+ else
+ return "No Thread";
+}
+
+
const char *dbug_print_select(SELECT_LEX *sl)
{
char *buf= dbug_item_print_buf;
diff --git a/sql/item.h b/sql/item.h
index 163c000f46c..5956b810d51 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -1035,7 +1035,7 @@ public:
LEX_CSTRING name; /* Name of item */
/* Original item name (if it was renamed)*/
- const char *orig_name;
+ LEX_CSTRING orig_name;
/* All common bool variables for an Item is stored here */
item_base_t base_flags;
@@ -3808,6 +3808,7 @@ public:
Item_equal *get_item_equal() override { return item_equal; }
void set_item_equal(Item_equal *item_eq) override { item_equal= item_eq; }
Item_equal *find_item_equal(COND_EQUAL *cond_equal) override;
+ bool contains(Field *field);
Item* propagate_equal_fields(THD *, const Context &, COND_EQUAL *) override;
Item *replace_equal_field(THD *thd, uchar *arg) override;
uint32 max_display_length() const override
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 67d7d03c0bf..80228917210 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -42,6 +42,7 @@
#include "sql_parse.h" // check_stack_overrun
#include "sql_cte.h"
#include "sql_test.h"
+#include "opt_trace.h"
double get_post_group_estimate(JOIN* join, double join_op_rows);
@@ -1588,8 +1589,9 @@ Item_exists_subselect::Item_exists_subselect(THD *thd,
{
DBUG_ENTER("Item_exists_subselect::Item_exists_subselect");
-
init(select_lex, new (thd->mem_root) select_exists_subselect(thd, this));
+ select_lex->distinct= 1;
+ select_lex->master_unit()->distinct= 1;
max_columns= UINT_MAX;
null_value= FALSE; //can't be NULL
base_flags&= ~item_base_t::MAYBE_NULL; //can't be NULL
@@ -1640,6 +1642,39 @@ Item_in_subselect::Item_in_subselect(THD *thd, Item * left_exp,
Item_row(thd, static_cast<Item_row*>(left_exp));
func= &eq_creator;
init(select_lex, new (thd->mem_root) select_exists_subselect(thd, this));
+ select_lex->distinct= 1;
+
+ /*
+ If the IN subquery (xxx IN (SELECT ...) is a join without grouping,
+ we don't need duplicates from the tables it is joining. These
+ tables can be derived tables, like shown in the following
+ example. In this case, it's useful to indicate that we don't need
+ duplicates from them either.
+
+ Example:
+ col IN (SELECT ... -- this is the select_lex
+ FROM
+ (SELECT ... FROM t1) AS t1, -- child1, first_inner_init().
+ (SELECT ... FROM t2) AS t2, -- child2
+ WHERE
+ ...
+ )
+
+ We don't need duplicates from either child1 or child2.
+ We only indicate this to child1 (select_lex->first_inner_unit()), as that
+ catches most of practically important use cases.
+
+ (The check for item==NULL is to make sure the subquery is a derived table
+ and not any other kind of subquery like another IN (SELECT ...) or a scalar-
+ context (SELECT 'foo'))
+ */
+
+ select_lex->master_unit()->distinct= 1;
+ if (!select_lex->with_sum_func &&
+ select_lex->first_inner_unit() &&
+ select_lex->first_inner_unit()->item == NULL)
+ select_lex->first_inner_unit()->distinct= 1;
+
max_columns= UINT_MAX;
set_maybe_null();
reset();
@@ -1667,6 +1702,16 @@ Item_allany_subselect::Item_allany_subselect(THD *thd, Item * left_exp,
Item_row(thd, static_cast<Item_row*>(left_exp));
func= func_creator(all_arg);
init(select_lex, new (thd->mem_root) select_exists_subselect(thd, this));
+ select_lex->distinct= 1;
+ /*
+ If this is is 'xxx IN (SELECT ...) mark that the we are only interested in
+ unique values for the select
+ */
+ select_lex->master_unit()->distinct= 1;
+ if (!select_lex->with_sum_func &&
+ select_lex->first_inner_unit() &&
+ select_lex->first_inner_unit()->item == NULL)
+ select_lex->first_inner_unit()->distinct= 1;
max_columns= 1;
reset();
//if test_limit will fail then error will be reported to client
@@ -3300,6 +3345,14 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg)
set possible optimization strategies
*/
in_subs->emb_on_expr_nest= emb_on_expr_nest;
+
+ {
+ OPT_TRACE_TRANSFORM(thd, trace_wrapper, trace_transform,
+ in_subs->get_select_lex()->select_number,
+ "EXISTS (SELECT)", "IN (SELECT)");
+ trace_transform.add( "upper_not", ( upper_not?true:false ) );
+ }
+
res= check_and_do_in_subquery_rewrites(join);
first_select->join->prepare_stage2();
@@ -4009,6 +4062,7 @@ int subselect_single_select_engine::exec()
char const *save_where= thd->where;
SELECT_LEX *save_select= thd->lex->current_select;
thd->lex->current_select= select_lex;
+ bool exec_error= 0;
DBUG_ENTER("subselect_single_select_engine::exec");
if (join->optimization_state == JOIN::NOT_OPTIMIZED)
@@ -4100,7 +4154,7 @@ int subselect_single_select_engine::exec()
}
}
- join->exec();
+ exec_error= join->exec();
/* Enable the optimizations back */
for (JOIN_TAB **ptab= changed_tabs; ptab != last_changed_tab; ptab++)
@@ -4118,7 +4172,7 @@ int subselect_single_select_engine::exec()
item->make_const();
thd->where= save_where;
thd->lex->current_select= save_select;
- DBUG_RETURN(join->error || thd->is_fatal_error || thd->is_error());
+ DBUG_RETURN(exec_error || thd->is_error());
}
thd->where= save_where;
thd->lex->current_select= save_select;
@@ -5665,9 +5719,8 @@ int subselect_hash_sj_engine::exec()
/* The subquery should be optimized, and materialized only once. */
DBUG_ASSERT(materialize_join->optimization_state == JOIN::OPTIMIZATION_DONE &&
!is_materialized);
- materialize_join->exec();
- if (unlikely((res= MY_TEST(materialize_join->error || thd->is_fatal_error ||
- thd->is_error()))))
+ res= materialize_join->exec();
+ if (unlikely((res= (res || thd->is_error()))))
goto err;
/*
diff --git a/sql/json_table.cc b/sql/json_table.cc
index 4f3cfb6b090..ded221269ad 100644
--- a/sql/json_table.cc
+++ b/sql/json_table.cc
@@ -54,6 +54,7 @@ public:
bzero(&m_hton, sizeof(m_hton));
m_hton.tablefile_extensions= hton_no_exts;
m_hton.slot= HA_SLOT_UNDEF;
+ m_hton.flags= HTON_HIDDEN;
}
};
@@ -245,6 +246,10 @@ public:
int open(const char *name, int mode, uint test_if_locked) override
{ return 0; }
int close(void) override { return 0; }
+ void update_optimizer_costs(OPTIMIZER_COSTS *costs) override
+ {
+ memcpy(costs, &heap_optimizer_costs, sizeof(*costs));
+ }
int rnd_init(bool scan) override;
int rnd_next(uchar *buf) override;
int rnd_pos(uchar * buf, uchar *pos) override;
diff --git a/sql/keycaches.cc b/sql/keycaches.cc
index 10bec7c1de8..250a287e229 100644
--- a/sql/keycaches.cc
+++ b/sql/keycaches.cc
@@ -15,6 +15,10 @@
#include "mariadb.h"
#include "keycaches.h"
+#include "optimizer_costs.h"
+#include "optimizer_defaults.h"
+#include "handler.h"
+#include "sql_class.h"
/****************************************************************************
Named list handling
@@ -22,10 +26,13 @@
NAMED_ILIST key_caches;
NAMED_ILIST rpl_filters;
+NAMED_ILIST linked_optimizer_costs;
extern "C" PSI_memory_key key_memory_KEY_CACHE;
extern PSI_memory_key key_memory_NAMED_ILINK_name;
+LEX_CSTRING default_base= {STRING_WITH_LEN("default")};
+
/**
ilink (intrusive list element) with a name
*/
@@ -46,7 +53,7 @@ public:
}
inline bool cmp(const char *name_cmp, size_t length)
{
- return length == name_length && !memcmp(name, name_cmp, length);
+ return !system_charset_info->strnncoll(name, name_length, name_cmp, length);
}
~NAMED_ILINK()
{
@@ -72,7 +79,8 @@ uchar* find_named(I_List<NAMED_ILINK> *list, const char *name, size_t length,
}
-bool NAMED_ILIST::delete_element(const char *name, size_t length, void (*free_element)(const char *name, void*))
+bool NAMED_ILIST::delete_element(const char *name, size_t length,
+ void (*free_element)(const char *name, void*))
{
I_List_iterator<NAMED_ILINK> it(*this);
NAMED_ILINK *element;
@@ -104,14 +112,12 @@ void NAMED_ILIST::delete_elements(void (*free_element)(const char *name, void*))
/* Key cache functions */
-LEX_CSTRING default_key_cache_base= {STRING_WITH_LEN("default")};
-
KEY_CACHE zero_key_cache; ///< @@nonexistent_cache.param->value_ptr() points here
KEY_CACHE *get_key_cache(const LEX_CSTRING *cache_name)
{
if (!cache_name || ! cache_name->length)
- cache_name= &default_key_cache_base;
+ cache_name= &default_base;
return ((KEY_CACHE*) find_named(&key_caches,
cache_name->str, cache_name->length, 0));
}
@@ -234,3 +240,128 @@ void free_all_rpl_filters()
{
rpl_filters.delete_elements(free_rpl_filter);
}
+
+
+/******************************************************************************
+ Optimizer costs functions
+******************************************************************************/
+
+LEX_CSTRING default_costs_base= {STRING_WITH_LEN("default")};
+
+OPTIMIZER_COSTS default_optimizer_costs=
+{
+ DEFAULT_DISK_READ_COST, // disk_read_cost
+ DEFAULT_INDEX_BLOCK_COPY_COST, // index_block_copy_cost
+ DEFAULT_WHERE_COST/4, // key_cmp_cost
+ DEFAULT_KEY_COPY_COST, // key_copy_cost
+ DEFAULT_KEY_LOOKUP_COST, // key_lookup_cost
+ DEFAULT_KEY_NEXT_FIND_COST, // key_next_find_cost
+ DEFAULT_DISK_READ_RATIO, // disk_read_ratio
+ DEFAULT_ROW_COPY_COST, // row_copy_cost
+ DEFAULT_ROW_LOOKUP_COST, // row_lookup_cost
+ DEFAULT_ROW_NEXT_FIND_COST, // row_next_find_cost
+ DEFAULT_ROWID_COMPARE_COST, // rowid_compare_cost
+ DEFAULT_ROWID_COPY_COST, // rowid_copy_cost
+ 1 // Cannot be deleted
+};
+
+OPTIMIZER_COSTS heap_optimizer_costs, tmp_table_optimizer_costs;
+
+OPTIMIZER_COSTS *get_optimizer_costs(const LEX_CSTRING *cache_name)
+{
+ if (!cache_name->length)
+ return &default_optimizer_costs;
+ return ((OPTIMIZER_COSTS*) find_named(&linked_optimizer_costs,
+ cache_name->str, cache_name->length,
+ 0));
+}
+
+OPTIMIZER_COSTS *create_optimizer_costs(const char *name, size_t length)
+{
+ OPTIMIZER_COSTS *optimizer_costs;
+ DBUG_ENTER("create_optimizer_costs");
+ DBUG_PRINT("enter",("name: %.*s", (int) length, name));
+
+ if ((optimizer_costs= (OPTIMIZER_COSTS*)
+ my_malloc(key_memory_KEY_CACHE,
+ sizeof(OPTIMIZER_COSTS), MYF(MY_ZEROFILL | MY_WME))))
+ {
+ if (!new NAMED_ILINK(&linked_optimizer_costs, name, length,
+ (uchar*) optimizer_costs))
+ {
+ my_free(optimizer_costs);
+ optimizer_costs= 0;
+ }
+ else
+ {
+ /* Mark that values are not yet set */
+ for (uint i=0 ; i < sizeof(OPTIMIZER_COSTS)/sizeof(double) ; i++)
+ ((double*) optimizer_costs)[i]= OPTIMIZER_COST_UNDEF;
+ }
+ }
+ DBUG_RETURN(optimizer_costs);
+}
+
+
+OPTIMIZER_COSTS *get_or_create_optimizer_costs(const char *name, size_t length)
+{
+ LEX_CSTRING optimizer_costs_name;
+ OPTIMIZER_COSTS *optimizer_costs;
+
+ optimizer_costs_name.str= name;
+ optimizer_costs_name.length= length;
+ if (!(optimizer_costs= get_optimizer_costs(&optimizer_costs_name)))
+ optimizer_costs= create_optimizer_costs(name, length);
+ return optimizer_costs;
+}
+
+extern "C"
+{
+bool process_optimizer_costs(process_optimizer_costs_t func, TABLE *param)
+{
+ I_List_iterator<NAMED_ILINK> it(linked_optimizer_costs);
+ NAMED_ILINK *element;
+ int res= 0;
+
+ while ((element= it++))
+ {
+ LEX_CSTRING name= { element->name, element->name_length };
+ OPTIMIZER_COSTS *costs= (OPTIMIZER_COSTS *) element->data;
+ res |= func(&name, costs, param);
+ }
+ return res != 0;
+}
+}
+
+bool create_default_optimizer_costs()
+{
+ return (new NAMED_ILINK(&linked_optimizer_costs,
+ default_base.str, default_base.length,
+ (uchar*) &default_optimizer_costs)) == 0;
+}
+
+
+/*
+ Make a copy of heap and tmp_table engine costs to be able to create
+ internal temporary tables without taking a mutex.
+*/
+
+void copy_tmptable_optimizer_costs()
+{
+ memcpy(&heap_optimizer_costs, heap_hton->optimizer_costs,
+ sizeof(heap_optimizer_costs));
+ memcpy(&tmp_table_optimizer_costs, TMP_ENGINE_HTON->optimizer_costs,
+ sizeof(tmp_table_optimizer_costs));
+}
+
+
+static void free_optimizer_costs(const char *name, void *cost)
+{
+ if ((OPTIMIZER_COSTS*) cost != &default_optimizer_costs)
+ my_free(cost);
+}
+
+void free_all_optimizer_costs()
+{
+ linked_optimizer_costs.delete_elements(free_optimizer_costs);
+}
diff --git a/sql/keycaches.h b/sql/keycaches.h
index 68c3dd3a2b0..721251b6745 100644
--- a/sql/keycaches.h
+++ b/sql/keycaches.h
@@ -35,7 +35,7 @@ class NAMED_ILIST: public I_List<NAMED_ILINK>
};
/* For key cache */
-extern LEX_CSTRING default_key_cache_base;
+extern LEX_CSTRING default_base;
extern KEY_CACHE zero_key_cache;
extern NAMED_ILIST key_caches;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 5e255646528..cabbfc472be 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -728,40 +728,7 @@ Log_event::Log_event(const uchar *buf,
when_sec_part= ~0UL;
server_id= uint4korr(buf + SERVER_ID_OFFSET);
data_written= uint4korr(buf + EVENT_LEN_OFFSET);
- if (description_event->binlog_version==1)
- {
- log_pos= 0;
- flags= 0;
- return;
- }
- /* 4.0 or newer */
log_pos= uint4korr(buf + LOG_POS_OFFSET);
- /*
- If the log is 4.0 (so here it can only be a 4.0 relay log read by
- the SQL thread or a 4.0 master binlog read by the I/O thread),
- log_pos is the beginning of the event: we transform it into the end
- of the event, which is more useful.
- But how do you know that the log is 4.0: you know it if
- description_event is version 3 *and* you are not reading a
- Format_desc (remember that mysqlbinlog starts by assuming that 5.0
- logs are in 4.0 format, until it finds a Format_desc).
- */
- if (description_event->binlog_version==3 &&
- (uchar)buf[EVENT_TYPE_OFFSET]<FORMAT_DESCRIPTION_EVENT && log_pos)
- {
- /*
- If log_pos=0, don't change it. log_pos==0 is a marker to mean
- "don't change rli->group_master_log_pos" (see
- inc_group_relay_log_pos()). As it is unreal log_pos, adding the
- event len's is nonsense. For example, a fake Rotate event should
- not have its log_pos (which is 0) changed or it will modify
- Exec_master_log_pos in SHOW SLAVE STATUS, displaying a nonsense
- value of (a non-zero offset which does not exist in the master's
- binlog, so which will cause problems if the user uses this value
- in CHANGE MASTER).
- */
- log_pos+= data_written; /* purecov: inspected */
- }
DBUG_PRINT("info", ("log_pos: %llu", log_pos));
flags= uint2korr(buf + FLAGS_OFFSET);
@@ -966,7 +933,7 @@ err:
if (force_opt)
DBUG_RETURN(new Unknown_log_event());
#endif
- if (event.length() >= OLD_HEADER_LEN)
+ if (event.length() >= LOG_EVENT_MINIMAL_HEADER_LEN)
sql_print_error("Error in Log_event::read_log_event(): '%s',"
" data_len: %lu, event_type: %u", error,
(ulong) uint4korr(&event[EVENT_LEN_OFFSET]),
@@ -1128,12 +1095,6 @@ Log_event* Log_event::read_log_event(const uchar *buf, uint event_len,
ev= new Query_compressed_log_event(buf, event_len, fdle,
QUERY_COMPRESSED_EVENT);
break;
- case LOAD_EVENT:
- ev= new Load_log_event(buf, event_len, fdle);
- break;
- case NEW_LOAD_EVENT:
- ev= new Load_log_event(buf, event_len, fdle);
- break;
case ROTATE_EVENT:
ev= new Rotate_log_event(buf, event_len, fdle);
break;
@@ -1146,21 +1107,12 @@ Log_event* Log_event::read_log_event(const uchar *buf, uint event_len,
case GTID_LIST_EVENT:
ev= new Gtid_list_log_event(buf, event_len, fdle);
break;
- case CREATE_FILE_EVENT:
- ev= new Create_file_log_event(buf, event_len, fdle);
- break;
case APPEND_BLOCK_EVENT:
ev= new Append_block_log_event(buf, event_len, fdle);
break;
case DELETE_FILE_EVENT:
ev= new Delete_file_log_event(buf, event_len, fdle);
break;
- case EXEC_LOAD_EVENT:
- ev= new Execute_load_log_event(buf, event_len, fdle);
- break;
- case START_EVENT_V3: /* this is sent only by MySQL <=4.x */
- ev= new Start_log_event_v3(buf, event_len, fdle);
- break;
case STOP_EVENT:
ev= new Stop_log_event(buf, fdle);
break;
@@ -1183,15 +1135,6 @@ Log_event* Log_event::read_log_event(const uchar *buf, uint event_len,
ev= new Format_description_log_event(buf, event_len, fdle);
break;
#if defined(HAVE_REPLICATION)
- case PRE_GA_WRITE_ROWS_EVENT:
- ev= new Write_rows_log_event_old(buf, event_len, fdle);
- break;
- case PRE_GA_UPDATE_ROWS_EVENT:
- ev= new Update_rows_log_event_old(buf, event_len, fdle);
- break;
- case PRE_GA_DELETE_ROWS_EVENT:
- ev= new Delete_rows_log_event_old(buf, event_len, fdle);
- break;
case WRITE_ROWS_EVENT_V1:
case WRITE_ROWS_EVENT:
ev= new Write_rows_log_event(buf, event_len, fdle);
@@ -1247,6 +1190,14 @@ Log_event* Log_event::read_log_event(const uchar *buf, uint event_len,
case START_ENCRYPTION_EVENT:
ev= new Start_encryption_log_event(buf, event_len, fdle);
break;
+ case PRE_GA_WRITE_ROWS_EVENT:
+ case PRE_GA_UPDATE_ROWS_EVENT:
+ case PRE_GA_DELETE_ROWS_EVENT:
+ case START_EVENT_V3: /* this is sent only by MySQL <=4.x */
+ case CREATE_FILE_EVENT:
+ case EXEC_LOAD_EVENT:
+ case LOAD_EVENT:
+ case NEW_LOAD_EVENT:
default:
DBUG_PRINT("error",("Unknown event code: %d",
(uchar) buf[EVENT_TYPE_OFFSET]));
@@ -1427,11 +1378,10 @@ Query_log_event::Query_log_event(const uchar *buf, uint event_len,
flags2_inited(0), sql_mode_inited(0), charset_inited(0), flags2(0),
auto_increment_increment(1), auto_increment_offset(1),
time_zone_len(0), lc_time_names_number(0), charset_database_number(0),
- table_map_for_update(0), xid(0), master_data_written(0), gtid_flags_extra(0),
+ table_map_for_update(0), xid(0), gtid_flags_extra(0),
sa_seq_no(0)
{
ulong data_len;
- uint32 tmp;
uint8 common_header_len, post_header_len;
Log_event::Byte *start;
const Log_event::Byte *end;
@@ -1460,45 +1410,23 @@ Query_log_event::Query_log_event(const uchar *buf, uint event_len,
db_len = (uchar)buf[Q_DB_LEN_OFFSET]; // TODO: add a check of all *_len vars
error_code = uint2korr(buf + Q_ERR_CODE_OFFSET);
+ status_vars_len= uint2korr(buf + Q_STATUS_VARS_LEN_OFFSET);
/*
- 5.0 format starts here.
- Depending on the format, we may or not have affected/warnings etc
- The remnent post-header to be parsed has length:
+ Check if status variable length is corrupt and will lead to very
+ wrong data. We could be even more strict and require data_len to
+ be even bigger, but this will suffice to catch most corruption
+ errors that can lead to a crash.
*/
- tmp= post_header_len - QUERY_HEADER_MINIMAL_LEN;
- if (tmp)
- {
- status_vars_len= uint2korr(buf + Q_STATUS_VARS_LEN_OFFSET);
- /*
- Check if status variable length is corrupt and will lead to very
- wrong data. We could be even more strict and require data_len to
- be even bigger, but this will suffice to catch most corruption
- errors that can lead to a crash.
- */
- if (status_vars_len > MY_MIN(data_len, MAX_SIZE_LOG_EVENT_STATUS))
- {
- DBUG_PRINT("info", ("status_vars_len (%u) > data_len (%lu); query= 0",
- status_vars_len, data_len));
- query= 0;
- DBUG_VOID_RETURN;
- }
- data_len-= status_vars_len;
- DBUG_PRINT("info", ("Query_log_event has status_vars_len: %u",
- (uint) status_vars_len));
- tmp-= 2;
- }
- else
+ if (status_vars_len > MY_MIN(data_len, MAX_SIZE_LOG_EVENT_STATUS))
{
- /*
- server version < 5.0 / binlog_version < 4 master's event is
- relay-logged with storing the original size of the event in
- Q_MASTER_DATA_WRITTEN_CODE status variable.
- The size is to be restored at reading Q_MASTER_DATA_WRITTEN_CODE-marked
- event from the relay log.
- */
- DBUG_ASSERT(description_event->binlog_version < 4);
- master_data_written= (uint32)data_written;
+ DBUG_PRINT("info", ("status_vars_len (%u) > data_len (%lu); query= 0",
+ status_vars_len, data_len));
+ query= 0;
+ DBUG_VOID_RETURN;
}
+ data_len-= status_vars_len;
+ DBUG_PRINT("info", ("Query_log_event has status_vars_len: %u",
+ (uint) status_vars_len));
/*
We have parsed everything we know in the post header for QUERY_EVENT,
the rest of post header is either comes from older version MySQL or
@@ -1585,9 +1513,9 @@ Query_log_event::Query_log_event(const uchar *buf, uint event_len,
table_map_for_update= uint8korr(pos);
pos+= 8;
break;
- case Q_MASTER_DATA_WRITTEN_CODE:
+ case Q_MASTER_DATA_WRITTEN_CODE: // impossible
CHECK_SPACE(pos, end, 4);
- data_written= master_data_written= uint4korr(pos);
+ data_written= uint4korr(pos);
pos+= 4;
break;
case Q_INVOKER:
@@ -1991,32 +1919,6 @@ Query_log_event::begin_event(String *packet, ulong ev_offset,
}
-/**************************************************************************
- Start_log_event_v3 methods
-**************************************************************************/
-
-
-Start_log_event_v3::Start_log_event_v3(const uchar *buf, uint event_len,
- const Format_description_log_event
- *description_event)
- :Log_event(buf, description_event), binlog_version(BINLOG_VERSION)
-{
- if (event_len < LOG_EVENT_MINIMAL_HEADER_LEN + ST_COMMON_HEADER_LEN_OFFSET)
- {
- server_version[0]= 0;
- return;
- }
- buf+= LOG_EVENT_MINIMAL_HEADER_LEN;
- binlog_version= uint2korr(buf+ST_BINLOG_VER_OFFSET);
- memcpy(server_version, buf+ST_SERVER_VER_OFFSET,
- ST_SERVER_VER_LEN);
- // prevent overrun if log is corrupted on disk
- server_version[ST_SERVER_VER_LEN-1]= 0;
- created= uint4korr(buf+ST_CREATED_OFFSET);
- dont_set_created= 1;
-}
-
-
/***************************************************************************
Format_description_log_event methods
****************************************************************************/
@@ -2040,10 +1942,10 @@ Start_log_event_v3::Start_log_event_v3(const uchar *buf, uint event_len,
Format_description_log_event::
Format_description_log_event(uint8 binlog_ver, const char* server_ver)
- :Start_log_event_v3(), event_type_permutation(0)
+ :Log_event(), created(0), binlog_version(binlog_ver),
+ dont_set_created(0), event_type_permutation(0)
{
- binlog_version= binlog_ver;
- switch (binlog_ver) {
+ switch (binlog_version) {
case 4: /* MySQL 5.0 */
memcpy(server_version, ::server_version, ST_SERVER_VER_LEN);
DBUG_EXECUTE_IF("pretend_version_50034_in_binlog",
@@ -2161,44 +2063,6 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver)
case 1: /* 3.23 */
case 3: /* 4.0.x x>=2 */
- /*
- We build an artificial (i.e. not sent by the master) event, which
- describes what those old master versions send.
- */
- if (binlog_ver==1)
- strmov(server_version, server_ver ? server_ver : "3.23");
- else
- strmov(server_version, server_ver ? server_ver : "4.0");
- common_header_len= binlog_ver==1 ? OLD_HEADER_LEN :
- LOG_EVENT_MINIMAL_HEADER_LEN;
- /*
- The first new event in binlog version 4 is Format_desc. So any event type
- after that does not exist in older versions. We use the events known by
- version 3, even if version 1 had only a subset of them (this is not a
- problem: it uses a few bytes for nothing but unifies code; it does not
- make the slave detect less corruptions).
- */
- number_of_event_types= FORMAT_DESCRIPTION_EVENT - 1;
- post_header_len=(uint8*) my_malloc(PSI_INSTRUMENT_ME,
- number_of_event_types*sizeof(uint8), MYF(0));
- if (post_header_len)
- {
- post_header_len[START_EVENT_V3-1]= START_V3_HEADER_LEN;
- post_header_len[QUERY_EVENT-1]= QUERY_HEADER_MINIMAL_LEN;
- post_header_len[STOP_EVENT-1]= 0;
- post_header_len[ROTATE_EVENT-1]= (binlog_ver==1) ? 0 : ROTATE_HEADER_LEN;
- post_header_len[INTVAR_EVENT-1]= 0;
- post_header_len[LOAD_EVENT-1]= LOAD_HEADER_LEN;
- post_header_len[SLAVE_EVENT-1]= 0;
- post_header_len[CREATE_FILE_EVENT-1]= CREATE_FILE_HEADER_LEN;
- post_header_len[APPEND_BLOCK_EVENT-1]= APPEND_BLOCK_HEADER_LEN;
- post_header_len[EXEC_LOAD_EVENT-1]= EXEC_LOAD_HEADER_LEN;
- post_header_len[DELETE_FILE_EVENT-1]= DELETE_FILE_HEADER_LEN;
- post_header_len[NEW_LOAD_EVENT-1]= post_header_len[LOAD_EVENT-1];
- post_header_len[RAND_EVENT-1]= 0;
- post_header_len[USER_VAR_EVENT-1]= 0;
- }
- break;
default: /* Includes binlog version 2 i.e. 4.0.x x<=1 */
post_header_len= 0; /* will make is_valid() fail */
break;
@@ -2232,14 +2096,26 @@ Format_description_log_event::
Format_description_log_event(const uchar *buf, uint event_len,
const Format_description_log_event*
description_event)
- :Start_log_event_v3(buf, event_len, description_event),
+ :Log_event(buf, description_event), binlog_version(BINLOG_VERSION),
common_header_len(0), post_header_len(NULL), event_type_permutation(0)
{
DBUG_ENTER("Format_description_log_event::Format_description_log_event(char*,...)");
- if (!Start_log_event_v3::is_valid())
- DBUG_VOID_RETURN; /* sanity check */
+ if (event_len < LOG_EVENT_MINIMAL_HEADER_LEN + ST_COMMON_HEADER_LEN_OFFSET)
+ {
+ server_version[0]= 0;
+ DBUG_VOID_RETURN;
+ }
buf+= LOG_EVENT_MINIMAL_HEADER_LEN;
- if ((common_header_len=buf[ST_COMMON_HEADER_LEN_OFFSET]) < OLD_HEADER_LEN)
+ binlog_version= uint2korr(buf+ST_BINLOG_VER_OFFSET);
+ memcpy(server_version, buf+ST_SERVER_VER_OFFSET, ST_SERVER_VER_LEN);
+ // prevent overrun if log is corrupted on disk
+ server_version[ST_SERVER_VER_LEN-1]= 0;
+ created= uint4korr(buf+ST_CREATED_OFFSET);
+ dont_set_created= 1;
+
+ if (server_version[0] == 0)
+ DBUG_VOID_RETURN; /* sanity check */
+ if ((common_header_len=buf[ST_COMMON_HEADER_LEN_OFFSET]) < LOG_EVENT_MINIMAL_HEADER_LEN)
DBUG_VOID_RETURN; /* sanity check */
number_of_event_types=
event_len - (LOG_EVENT_MINIMAL_HEADER_LEN + ST_COMMON_HEADER_LEN_OFFSET + 1);
@@ -2427,120 +2303,6 @@ Start_encryption_log_event(const uchar *buf, uint event_len,
}
-/**************************************************************************
- Load_log_event methods
- General note about Load_log_event: the binlogging of LOAD DATA INFILE is
- going to be changed in 5.0 (or maybe in 5.1; not decided yet).
- However, the 5.0 slave could still have to read such events (from a 4.x
- master), convert them (which just means maybe expand the header, when 5.0
- servers have a UID in events) (remember that whatever is after the header
- will be like in 4.x, as this event's format is not modified in 5.0 as we
- will use new types of events to log the new LOAD DATA INFILE features).
- To be able to read/convert, we just need to not assume that the common
- header is of length LOG_EVENT_HEADER_LEN (we must use the description
- event).
- Note that I (Guilhem) manually tested replication of a big LOAD DATA INFILE
- between 3.23 and 5.0, and between 4.0 and 5.0, and it works fine (and the
- positions displayed in SHOW SLAVE STATUS then are fine too).
-**************************************************************************/
-
-
-/**
- @note
- The caller must do buf[event_len]= 0 before he starts using the
- constructed event.
-*/
-
-Load_log_event::Load_log_event(const uchar *buf, uint event_len,
- const Format_description_log_event
- *description_event)
- :Log_event(buf, description_event), num_fields(0), fields(0),
- field_lens(0),field_block_len(0),
- table_name(0), db(0), fname(0), local_fname(FALSE),
- /*
- Load_log_event which comes from the binary log does not contain
- information about the type of insert which was used on the master.
- Assume that it was an ordinary, non-concurrent LOAD DATA.
- */
- is_concurrent(FALSE)
-{
- DBUG_ENTER("Load_log_event");
- /*
- I (Guilhem) manually tested replication of LOAD DATA INFILE for 3.23->5.0,
- 4.0->5.0 and 5.0->5.0 and it works.
- */
- if (event_len)
- copy_log_event(buf, event_len,
- (((uchar)buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) ?
- LOAD_HEADER_LEN +
- description_event->common_header_len :
- LOAD_HEADER_LEN + LOG_EVENT_HEADER_LEN),
- description_event);
- /* otherwise it's a derived class, will call copy_log_event() itself */
- DBUG_VOID_RETURN;
-}
-
-
-/*
- Load_log_event::copy_log_event()
-*/
-
-int Load_log_event::copy_log_event(const uchar *buf, ulong event_len,
- int body_offset,
- const Format_description_log_event
- *description_event)
-{
- DBUG_ENTER("Load_log_event::copy_log_event");
- uint data_len;
- if ((int) event_len <= body_offset)
- DBUG_RETURN(1);
- const uchar *buf_end= buf + event_len;
- /* this is the beginning of the post-header */
- const uchar *data_head= buf + description_event->common_header_len;
- thread_id= slave_proxy_id= uint4korr(data_head + L_THREAD_ID_OFFSET);
- exec_time= uint4korr(data_head + L_EXEC_TIME_OFFSET);
- skip_lines= uint4korr(data_head + L_SKIP_LINES_OFFSET);
- table_name_len= (uint)data_head[L_TBL_LEN_OFFSET];
- db_len= (uint)data_head[L_DB_LEN_OFFSET];
- num_fields= uint4korr(data_head + L_NUM_FIELDS_OFFSET);
-
- /*
- Sql_ex.init() on success returns the pointer to the first byte after
- the sql_ex structure, which is the start of field lengths array.
- */
- if (!(field_lens= (uchar*) sql_ex.init(buf + body_offset, buf_end,
- buf[EVENT_TYPE_OFFSET] != LOAD_EVENT)))
- DBUG_RETURN(1);
-
- data_len= event_len - body_offset;
- if (num_fields > data_len) // simple sanity check against corruption
- DBUG_RETURN(1);
- for (uint i= 0; i < num_fields; i++)
- field_block_len+= (uint)field_lens[i] + 1;
-
- fields= (char*) field_lens + num_fields;
- table_name= fields + field_block_len;
- if (strlen(table_name) > NAME_LEN)
- goto err;
-
- db= table_name + table_name_len + 1;
- DBUG_EXECUTE_IF("simulate_invalid_address", db_len= data_len;);
- fname= db + db_len + 1;
- if ((db_len > data_len) || (fname > (char*) buf_end))
- goto err;
- fname_len= (uint) strlen(fname);
- if ((fname_len > data_len) || (fname + fname_len > (char*) buf_end))
- goto err;
- // null termination is accomplished by the caller doing buf[event_len]=0
-
- DBUG_RETURN(0);
-
-err:
- // Invalid event.
- table_name= 0;
- DBUG_RETURN(1);
-}
-
/**************************************************************************
Rotate_log_event methods
@@ -3021,68 +2783,6 @@ err:
/**************************************************************************
- Create_file_log_event methods
-**************************************************************************/
-
-/*
- Create_file_log_event ctor
-*/
-
-Create_file_log_event::
-Create_file_log_event(const uchar *buf, uint len,
- const Format_description_log_event* description_event)
- :Load_log_event(buf,0,description_event),fake_base(0),block(0),
- inited_from_old(0)
-{
- DBUG_ENTER("Create_file_log_event::Create_file_log_event(char*,...)");
- uint block_offset;
- uint header_len= description_event->common_header_len;
- uint8 load_header_len= description_event->post_header_len[LOAD_EVENT-1];
- uint8 create_file_header_len= description_event->post_header_len[CREATE_FILE_EVENT-1];
- if (!(event_buf= (uchar*) my_memdup(PSI_INSTRUMENT_ME, buf, len,
- MYF(MY_WME))) ||
- copy_log_event(event_buf,len,
- (((uchar)buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) ?
- load_header_len + header_len :
- (fake_base ? (header_len+load_header_len) :
- (header_len+load_header_len) +
- create_file_header_len)),
- description_event))
- DBUG_VOID_RETURN;
- if (description_event->binlog_version!=1)
- {
- file_id= uint4korr(buf +
- header_len +
- load_header_len + CF_FILE_ID_OFFSET);
- /*
- Note that it's ok to use get_data_size() below, because it is computed
- with values we have already read from this event (because we called
- copy_log_event()); we are not using slave's format info to decode
- master's format, we are really using master's format info.
- Anyway, both formats should be identical (except the common_header_len)
- as these Load events are not changed between 4.0 and 5.0 (as logging of
- LOAD DATA INFILE does not use Load_log_event in 5.0).
-
- The + 1 is for \0 terminating fname
- */
- block_offset= (description_event->common_header_len +
- Load_log_event::get_data_size() +
- create_file_header_len + 1);
- if (len < block_offset)
- DBUG_VOID_RETURN;
- block= const_cast<uchar*>(buf) + block_offset;
- block_len= len - block_offset;
- }
- else
- {
- sql_ex.force_new_format();
- inited_from_old= 1;
- }
- DBUG_VOID_RETURN;
-}
-
-
-/**************************************************************************
Append_block_log_event methods
**************************************************************************/
@@ -3131,27 +2831,6 @@ Delete_file_log_event(const uchar *buf, uint len,
/**************************************************************************
- Execute_load_log_event methods
-**************************************************************************/
-
-/*
- Execute_load_log_event ctor
-*/
-
-Execute_load_log_event::
-Execute_load_log_event(const uchar *buf, uint len,
- const Format_description_log_event* description_event)
- :Log_event(buf, description_event), file_id(0)
-{
- uint8 common_header_len= description_event->common_header_len;
- uint8 exec_load_header_len= description_event->post_header_len[EXEC_LOAD_EVENT-1];
- if (len < (uint)(common_header_len+exec_load_header_len))
- return;
- file_id= uint4korr(buf + common_header_len + EL_FILE_ID_OFFSET);
-}
-
-
-/**************************************************************************
Begin_load_query_log_event methods
**************************************************************************/
diff --git a/sql/log_event.h b/sql/log_event.h
index 0b1503a5b03..6b8853493be 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -169,21 +169,17 @@ class String;
See the #defines below for the format specifics.
The events which really update data are Query_log_event,
- Execute_load_query_log_event and old Load_log_event and
- Execute_load_log_event events (Execute_load_query is used together with
- Begin_load_query and Append_block events to replicate LOAD DATA INFILE.
- Create_file/Append_block/Execute_load (which includes Load_log_event)
- were used to replicate LOAD DATA before the 5.0.3).
+ Execute_load_query_log_event and Execute_load_log_event events
+ (Execute_load_query is used together with Begin_load_query and Append_block
+ events to replicate LOAD DATA INFILE.
****************************************************************************/
#define LOG_EVENT_HEADER_LEN 19 /* the fixed header length */
-#define OLD_HEADER_LEN 13 /* the fixed header length in 3.23 */
/*
- Fixed header length, where 4.x and 5.0 agree. That is, 5.0 may have a longer
- header (it will for sure when we have the unique event's ID), but at least
- the first 19 bytes are the same in 4.x and 5.0. So when we have the unique
- event's ID, LOG_EVENT_HEADER_LEN will be something like 26, but
+ Fixed header length. That is, some future version may have a longer
+ header, but at least the first 19 bytes will be the same. So
+ LOG_EVENT_HEADER_LEN will be something like 26, but
LOG_EVENT_MINIMAL_HEADER_LEN will remain 19.
*/
#define LOG_EVENT_MINIMAL_HEADER_LEN 19
@@ -604,11 +600,6 @@ enum Log_event_type
APPEND_BLOCK_EVENT= 9,
EXEC_LOAD_EVENT= 10,
DELETE_FILE_EVENT= 11,
- /*
- NEW_LOAD_EVENT is like LOAD_EVENT except that it has a longer
- sql_ex, allowing multibyte TERMINATED BY etc; both types share the
- same class (Load_log_event)
- */
NEW_LOAD_EVENT= 12,
RAND_EVENT= 13,
USER_VAR_EVENT= 14,
@@ -2098,10 +2089,9 @@ public:
uint16 error_code;
my_thread_id thread_id;
/*
- For events created by Query_log_event::do_apply_event (and
- Load_log_event::do_apply_event()) we need the *original* thread
- id, to be able to log the event with the original (=master's)
- thread id (fix for BUG#1686).
+ For events created by Query_log_event::do_apply_event we need the
+ *original* thread id, to be able to log the event with the original
+ (=master's) thread id (fix for BUG#1686).
*/
ulong slave_proxy_id;
@@ -2125,12 +2115,6 @@ public:
'sql_mode', 'affected' etc. Sometimes 'value' must be a short string, so
its first byte is its length. For now the order of status vars is:
flags2 - sql_mode - catalog - autoinc - charset
- We should add the same thing to Load_log_event, but in fact
- LOAD DATA INFILE is going to be logged with a new type of event (logging of
- the plain text query), so Load_log_event would be frozen, so no need. The
- new way of logging LOAD DATA INFILE would use a derived class of
- Query_log_event, so automatically benefit from the work already done for
- status variables in Query_log_event.
*/
uint16 status_vars_len;
@@ -2163,16 +2147,6 @@ public:
/* Xid for the event, if such exists */
ulonglong xid;
/*
- Holds the original length of a Query_log_event that comes from a
- master of version < 5.0 (i.e., binlog_version < 4). When the IO
- thread writes the relay log, it augments the Query_log_event with a
- Q_MASTER_DATA_WRITTEN_CODE status_var that holds the original event
- length. This field is initialized to non-zero in the SQL thread when
- it reads this augmented event. SQL thread does not write
- Q_MASTER_DATA_WRITTEN_CODE to the slave's server binlog.
- */
- uint32 master_data_written;
- /*
A copy of Gtid event's extra flags that is relevant for two-phase
logged ALTER.
*/
@@ -2331,412 +2305,6 @@ struct sql_ex_info
};
/**
- @class Load_log_event
-
- This log event corresponds to a "LOAD DATA INFILE" SQL query on the
- following form:
-
- @verbatim
- (1) USE db;
- (2) LOAD DATA [CONCURRENT] [LOCAL] INFILE 'file_name'
- (3) [REPLACE | IGNORE]
- (4) INTO TABLE 'table_name'
- (5) [FIELDS
- (6) [TERMINATED BY 'field_term']
- (7) [[OPTIONALLY] ENCLOSED BY 'enclosed']
- (8) [ESCAPED BY 'escaped']
- (9) ]
- (10) [LINES
- (11) [TERMINATED BY 'line_term']
- (12) [LINES STARTING BY 'line_start']
- (13) ]
- (14) [IGNORE skip_lines LINES]
- (15) (field_1, field_2, ..., field_n)@endverbatim
-
- @section Load_log_event_binary_format Binary Format
-
- The Post-Header consists of the following six components.
-
- <table>
- <caption>Post-Header for Load_log_event</caption>
-
- <tr>
- <th>Name</th>
- <th>Format</th>
- <th>Description</th>
- </tr>
-
- <tr>
- <td>slave_proxy_id</td>
- <td>4 byte unsigned integer</td>
- <td>An integer identifying the client thread that issued the
- query. The id is unique per server. (Note, however, that two
- threads on different servers may have the same slave_proxy_id.)
- This is used when a client thread creates a temporary table local
- to the client. The slave_proxy_id is used to distinguish
- temporary tables that belong to different clients.
- </td>
- </tr>
-
- <tr>
- <td>exec_time</td>
- <td>4 byte unsigned integer</td>
- <td>The time from when the query started to when it was logged in
- the binlog, in seconds.</td>
- </tr>
-
- <tr>
- <td>skip_lines</td>
- <td>4 byte unsigned integer</td>
- <td>The number on line (14) above, if present, or 0 if line (14)
- is left out.
- </td>
- </tr>
-
- <tr>
- <td>table_name_len</td>
- <td>1 byte unsigned integer</td>
- <td>The length of 'table_name' on line (4) above.</td>
- </tr>
-
- <tr>
- <td>db_len</td>
- <td>1 byte unsigned integer</td>
- <td>The length of 'db' on line (1) above.</td>
- </tr>
-
- <tr>
- <td>num_fields</td>
- <td>4 byte unsigned integer</td>
- <td>The number n of fields on line (15) above.</td>
- </tr>
- </table>
-
- The Body contains the following components.
-
- <table>
- <caption>Body of Load_log_event</caption>
-
- <tr>
- <th>Name</th>
- <th>Format</th>
- <th>Description</th>
- </tr>
-
- <tr>
- <td>sql_ex</td>
- <td>variable length</td>
-
- <td>Describes the part of the query on lines (3) and
- (5)&ndash;(13) above. More precisely, it stores the five strings
- (on lines) field_term (6), enclosed (7), escaped (8), line_term
- (11), and line_start (12); as well as a bitfield indicating the
- presence of the keywords REPLACE (3), IGNORE (3), and OPTIONALLY
- (7).
-
- The data is stored in one of two formats, called "old" and "new".
- The type field of Common-Header determines which of these two
- formats is used: type LOAD_EVENT means that the old format is
- used, and type NEW_LOAD_EVENT means that the new format is used.
- When MySQL writes a Load_log_event, it uses the new format if at
- least one of the five strings is two or more bytes long.
- Otherwise (i.e., if all strings are 0 or 1 bytes long), the old
- format is used.
-
- The new and old format differ in the way the five strings are
- stored.
-
- <ul>
- <li> In the new format, the strings are stored in the order
- field_term, enclosed, escaped, line_term, line_start. Each string
- consists of a length (1 byte), followed by a sequence of
- characters (0-255 bytes). Finally, a boolean combination of the
- following flags is stored in 1 byte: REPLACE_FLAG==0x4,
- IGNORE_FLAG==0x8, and OPT_ENCLOSED_FLAG==0x2. If a flag is set,
- it indicates the presence of the corresponding keyword in the SQL
- query.
-
- <li> In the old format, we know that each string has length 0 or
- 1. Therefore, only the first byte of each string is stored. The
- order of the strings is the same as in the new format. These five
- bytes are followed by the same 1 byte bitfield as in the new
- format. Finally, a 1 byte bitfield called empty_flags is stored.
- The low 5 bits of empty_flags indicate which of the five strings
- have length 0. For each of the following flags that is set, the
- corresponding string has length 0; for the flags that are not set,
- the string has length 1: FIELD_TERM_EMPTY==0x1,
- ENCLOSED_EMPTY==0x2, LINE_TERM_EMPTY==0x4, LINE_START_EMPTY==0x8,
- ESCAPED_EMPTY==0x10.
- </ul>
-
- Thus, the size of the new format is 6 bytes + the sum of the sizes
- of the five strings. The size of the old format is always 7
- bytes.
- </td>
- </tr>
-
- <tr>
- <td>field_lens</td>
- <td>num_fields 1 byte unsigned integers</td>
- <td>An array of num_fields integers representing the length of
- each field in the query. (num_fields is from the Post-Header).
- </td>
- </tr>
-
- <tr>
- <td>fields</td>
- <td>num_fields null-terminated strings</td>
- <td>An array of num_fields null-terminated strings, each
- representing a field in the query. (The trailing zero is
- redundant, since the length are stored in the num_fields array.)
- The total length of all strings equals to the sum of all
- field_lens, plus num_fields bytes for all the trailing zeros.
- </td>
- </tr>
-
- <tr>
- <td>table_name</td>
- <td>null-terminated string of length table_len+1 bytes</td>
- <td>The 'table_name' from the query, as a null-terminated string.
- (The trailing zero is actually redundant since the table_len is
- known from Post-Header.)
- </td>
- </tr>
-
- <tr>
- <td>db</td>
- <td>null-terminated string of length db_len+1 bytes</td>
- <td>The 'db' from the query, as a null-terminated string.
- (The trailing zero is actually redundant since the db_len is known
- from Post-Header.)
- </td>
- </tr>
-
- <tr>
- <td>file_name</td>
- <td>variable length string without trailing zero, extending to the
- end of the event (determined by the length field of the
- Common-Header)
- </td>
- <td>The 'file_name' from the query.
- </td>
- </tr>
-
- </table>
-
- @subsection Load_log_event_notes_on_previous_versions Notes on Previous Versions
-
- This event type is understood by current versions, but only
- generated by MySQL 3.23 and earlier.
-*/
-class Load_log_event: public Log_event
-{
-private:
-protected:
- int copy_log_event(const uchar *buf, ulong event_len,
- int body_offset,
- const Format_description_log_event* description_event);
-
-public:
- bool print_query(THD *thd, bool need_db, const char *cs, String *buf,
- my_off_t *fn_start, my_off_t *fn_end,
- const char *qualify_db);
- my_thread_id thread_id;
- ulong slave_proxy_id;
- uint32 table_name_len;
- /*
- No need to have a catalog, as these events can only come from 4.x.
- TODO: this may become false if Dmitri pushes his new LOAD DATA INFILE in
- 5.0 only (not in 4.x).
- */
- uint32 db_len;
- uint32 fname_len;
- uint32 num_fields;
- const char* fields;
- const uchar* field_lens;
- uint32 field_block_len;
-
- const char* table_name;
- const char* db;
- const char* fname;
- uint32 skip_lines;
- sql_ex_info sql_ex;
- bool local_fname;
- /**
- Indicates that this event corresponds to LOAD DATA CONCURRENT,
-
- @note Since Load_log_event event coming from the binary log
- lacks information whether LOAD DATA on master was concurrent
- or not, this flag is only set to TRUE for an auxiliary
- Load_log_event object which is used in mysql_load() to
- re-construct LOAD DATA statement from function parameters,
- for logging.
- */
- bool is_concurrent;
-
- /* fname doesn't point to memory inside Log_event::temp_buf */
- void set_fname_outside_temp_buf(const char *afname, size_t alen)
- {
- fname= afname;
- fname_len= (uint)alen;
- local_fname= TRUE;
- }
- /* fname doesn't point to memory inside Log_event::temp_buf */
- int check_fname_outside_temp_buf()
- {
- return local_fname;
- }
-
-#ifdef MYSQL_SERVER
- String field_lens_buf;
- String fields_buf;
-
- Load_log_event(THD* thd, const sql_exchange* ex, const char* db_arg,
- const char* table_name_arg,
- List<Item>& fields_arg,
- bool is_concurrent_arg,
- enum enum_duplicates handle_dup, bool ignore,
- bool using_trans);
- void set_fields(const char* db, List<Item> &fields_arg,
- Name_resolution_context *context);
- const char* get_db() { return db; }
-#ifdef HAVE_REPLICATION
- void pack_info(Protocol* protocol);
-#endif /* HAVE_REPLICATION */
-#else
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info, bool commented);
-#endif
-
- /*
- Note that for all the events related to LOAD DATA (Load_log_event,
- Create_file/Append/Exec/Delete, we pass description_event; however as
- logging of LOAD DATA is going to be changed in 4.1 or 5.0, this is only used
- for the common_header_len (post_header_len will not be changed).
- */
- Load_log_event(const uchar *buf, uint event_len,
- const Format_description_log_event* description_event);
- ~Load_log_event() = default;
- Log_event_type get_type_code()
- {
- return sql_ex.new_format() ? NEW_LOAD_EVENT: LOAD_EVENT;
- }
-#ifdef MYSQL_SERVER
- bool write_data_header();
- bool write_data_body();
-#endif
- bool is_valid() const { return table_name != 0; }
- int get_data_size()
- {
- return (table_name_len + db_len + 2 + fname_len
- + LOAD_HEADER_LEN
- + sql_ex.data_size() + field_block_len + num_fields);
- }
-
-public: /* !!! Public in this patch to allow old usage */
-#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
- virtual int do_apply_event(rpl_group_info *rgi)
- {
- return do_apply_event(thd->slave_net,rgi,0);
- }
-
- int do_apply_event(NET *net, rpl_group_info *rgi,
- bool use_rli_only_for_errors);
-#endif
-};
-
-/**
- @class Start_log_event_v3
-
- Start_log_event_v3 is the Start_log_event of binlog format 3 (MySQL 3.23 and
- 4.x).
-
- Format_description_log_event derives from Start_log_event_v3; it is
- the Start_log_event of binlog format 4 (MySQL 5.0), that is, the
- event that describes the other events' Common-Header/Post-Header
- lengths. This event is sent by MySQL 5.0 whenever it starts sending
- a new binlog if the requested position is >4 (otherwise if ==4 the
- event will be sent naturally).
-
- @section Start_log_event_v3_binary_format Binary Format
-*/
-class Start_log_event_v3: public Log_event
-{
-public:
- /*
- If this event is at the start of the first binary log since server
- startup 'created' should be the timestamp when the event (and the
- binary log) was created. In the other case (i.e. this event is at
- the start of a binary log created by FLUSH LOGS or automatic
- rotation), 'created' should be 0. This "trick" is used by MySQL
- >=4.0.14 slaves to know whether they must drop stale temporary
- tables and whether they should abort unfinished transaction.
-
- Note that when 'created'!=0, it is always equal to the event's
- timestamp; indeed Start_log_event is written only in log.cc where
- the first constructor below is called, in which 'created' is set
- to 'when'. So in fact 'created' is a useless variable. When it is
- 0 we can read the actual value from timestamp ('when') and when it
- is non-zero we can read the same value from timestamp
- ('when'). Conclusion:
- - we use timestamp to print when the binlog was created.
- - we use 'created' only to know if this is a first binlog or not.
- In 3.23.57 we did not pay attention to this identity, so mysqlbinlog in
- 3.23.57 does not print 'created the_date' if created was zero. This is now
- fixed.
- */
- time_t created;
- uint16 binlog_version;
- char server_version[ST_SERVER_VER_LEN];
- /*
- We set this to 1 if we don't want to have the created time in the log,
- which is the case when we rollover to a new log.
- */
- bool dont_set_created;
-
-#ifdef MYSQL_SERVER
- Start_log_event_v3();
-#ifdef HAVE_REPLICATION
- void pack_info(Protocol* protocol);
-#endif /* HAVE_REPLICATION */
-#else
- Start_log_event_v3() = default;
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
-#endif
-
- Start_log_event_v3(const uchar *buf, uint event_len,
- const Format_description_log_event* description_event);
- ~Start_log_event_v3() = default;
- Log_event_type get_type_code() { return START_EVENT_V3;}
- my_off_t get_header_len(my_off_t l __attribute__((unused)))
- { return LOG_EVENT_MINIMAL_HEADER_LEN; }
-#ifdef MYSQL_SERVER
- bool write();
-#endif
- bool is_valid() const { return server_version[0] != 0; }
- int get_data_size()
- {
- return START_V3_HEADER_LEN; //no variable-sized part
- }
-
-protected:
-#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
- virtual int do_apply_event(rpl_group_info *rgi);
- virtual enum_skip_reason do_shall_skip(rpl_group_info*)
- {
- /*
- Events from ourself should be skipped, but they should not
- decrease the slave skip counter.
- */
- if (this->server_id == global_system_variables.server_id)
- return Log_event::EVENT_SKIP_IGNORE;
- else
- return Log_event::EVENT_SKIP_NOT;
- }
-#endif
-};
-
-/**
@class Start_encryption_log_event
Start_encryption_log_event marks the beginning of encrypted data (all events
@@ -2846,10 +2414,41 @@ public:
@section Format_description_log_event_binary_format Binary Format
*/
-class Format_description_log_event: public Start_log_event_v3
+class Format_description_log_event: public Log_event
{
public:
/*
+ If this event is at the start of the first binary log since server
+ startup 'created' should be the timestamp when the event (and the
+ binary log) was created. In the other case (i.e. this event is at
+ the start of a binary log created by FLUSH LOGS or automatic
+ rotation), 'created' should be 0. This "trick" is used by MySQL
+ >=4.0.14 slaves to know whether they must drop stale temporary
+ tables and whether they should abort unfinished transaction.
+
+ Note that when 'created'!=0, it is always equal to the event's
+ timestamp; indeed Start_log_event is written only in log.cc where
+ the first constructor below is called, in which 'created' is set
+ to 'when'. So in fact 'created' is a useless variable. When it is
+ 0 we can read the actual value from timestamp ('when') and when it
+ is non-zero we can read the same value from timestamp
+ ('when'). Conclusion:
+ - we use timestamp to print when the binlog was created.
+ - we use 'created' only to know if this is a first binlog or not.
+ In 3.23.57 we did not pay attention to this identity, so mysqlbinlog in
+ 3.23.57 does not print 'created the_date' if created was zero. This is now
+ fixed.
+ */
+ time_t created;
+ uint16 binlog_version;
+ char server_version[ST_SERVER_VER_LEN];
+ /*
+ We set this to 1 if we don't want to have the created time in the log,
+ which is the case when we rollover to a new log.
+ */
+ bool dont_set_created;
+
+ /*
The size of the fixed header which _all_ events have
(for binlogs written by this version, this is equal to
LOG_EVENT_HEADER_LEN), except FORMAT_DESCRIPTION_EVENT and ROTATE_EVENT
@@ -2857,8 +2456,8 @@ public:
*/
uint8 common_header_len;
uint8 number_of_event_types;
- /*
- The list of post-headers' lengths followed
+ /*
+ The list of post-headers' lengths followed
by the checksum alg description byte
*/
uint8 *post_header_len;
@@ -2887,14 +2486,18 @@ public:
my_free(post_header_len);
}
Log_event_type get_type_code() { return FORMAT_DESCRIPTION_EVENT;}
+ my_off_t get_header_len(my_off_t) { return LOG_EVENT_MINIMAL_HEADER_LEN; }
#ifdef MYSQL_SERVER
bool write();
+#ifdef HAVE_REPLICATION
+ void pack_info(Protocol* protocol);
+#endif /* HAVE_REPLICATION */
+#else
+ bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
bool header_is_valid() const
{
- return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN :
- LOG_EVENT_MINIMAL_HEADER_LEN)) &&
- (post_header_len != NULL));
+ return common_header_len >= LOG_EVENT_MINIMAL_HEADER_LEN && post_header_len;
}
bool is_valid() const
@@ -3106,7 +2709,7 @@ public:
const Format_description_log_event *description_event):
Log_event(buf, description_event) {}
- ~Xid_apply_log_event() {}
+ ~Xid_apply_log_event() = default;
bool is_valid() const { return 1; }
private:
#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
@@ -3302,7 +2905,7 @@ public:
#endif
XA_prepare_log_event(const uchar *buf,
const Format_description_log_event *description_event);
- ~XA_prepare_log_event() {}
+ ~XA_prepare_log_event() = default;
Log_event_type get_type_code() { return XA_PREPARE_LOG_EVENT; }
bool is_valid() const { return m_xid.formatID != -1; }
int get_data_size()
@@ -3860,82 +3463,6 @@ public:
};
-/* the classes below are for the new LOAD DATA INFILE logging */
-
-/**
- @class Create_file_log_event
-
- @section Create_file_log_event_binary_format Binary Format
-*/
-
-class Create_file_log_event: public Load_log_event
-{
-protected:
- /*
- Pretend we are Load event, so we can write out just
- our Load part - used on the slave when writing event out to
- SQL_LOAD-*.info file
- */
- bool fake_base;
-public:
- uchar *block;
- const uchar *event_buf;
- uint block_len;
- uint file_id;
- bool inited_from_old;
-
-#ifdef MYSQL_SERVER
- Create_file_log_event(THD* thd, sql_exchange* ex, const char* db_arg,
- const char* table_name_arg,
- List<Item>& fields_arg,
- bool is_concurrent_arg,
- enum enum_duplicates handle_dup, bool ignore,
- uchar* block_arg, uint block_len_arg,
- bool using_trans);
-#ifdef HAVE_REPLICATION
- void pack_info(Protocol* protocol);
-#endif /* HAVE_REPLICATION */
-#else
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info,
- bool enable_local);
-#endif
-
- Create_file_log_event(const uchar *buf, uint event_len,
- const Format_description_log_event* description_event);
- ~Create_file_log_event()
- {
- my_free((void*) event_buf);
- }
-
- Log_event_type get_type_code()
- {
- return fake_base ? Load_log_event::get_type_code() : CREATE_FILE_EVENT;
- }
- int get_data_size()
- {
- return (fake_base ? Load_log_event::get_data_size() :
- Load_log_event::get_data_size() +
- 4 + 1 + block_len);
- }
- bool is_valid() const { return inited_from_old || block != 0; }
-#ifdef MYSQL_SERVER
- bool write_data_header();
- bool write_data_body();
- /*
- Cut out Create_file extensions and
- write it as Load event - used on the slave
- */
- bool write_base();
-#endif
-
-private:
-#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
- virtual int do_apply_event(rpl_group_info *rgi);
-#endif
-};
-
-
/**
@class Append_block_log_event
@@ -3955,9 +3482,7 @@ public:
used by Append_block_log_event::write()), so it can't be read in
the Append_block_log_event(const uchar *buf, int event_len)
constructor. In other words, 'db' is used only for filtering by
- binlog-*-db rules. Create_file_log_event is different: it's 'db'
- (which is inherited from Load_log_event) is written to the binlog
- and can be re-read.
+ binlog-*-db rules.
*/
const char* db;
@@ -4033,46 +3558,6 @@ private:
/**
- @class Execute_load_log_event
-
- @section Delete_file_log_event_binary_format Binary Format
-*/
-
-class Execute_load_log_event: public Log_event
-{
-public:
- uint file_id;
- const char* db; /* see comment in Append_block_log_event */
-
-#ifdef MYSQL_SERVER
- Execute_load_log_event(THD* thd, const char* db_arg, bool using_trans);
-#ifdef HAVE_REPLICATION
- void pack_info(Protocol* protocol);
-#endif /* HAVE_REPLICATION */
-#else
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
-#endif
-
- Execute_load_log_event(const uchar *buf, uint event_len,
- const Format_description_log_event
- *description_event);
- ~Execute_load_log_event() = default;
- Log_event_type get_type_code() { return EXEC_LOAD_EVENT;}
- int get_data_size() { return EXEC_LOAD_HEADER_LEN ;}
- bool is_valid() const { return file_id != 0; }
-#ifdef MYSQL_SERVER
- bool write();
- const char* get_db() { return db; }
-#endif
-
-private:
-#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
- virtual int do_apply_event(rpl_group_info *rgi);
-#endif
-};
-
-
-/**
@class Begin_load_query_log_event
Event for the first block of file to be loaded, its only difference from
@@ -5357,8 +4842,6 @@ private:
*/
virtual int do_exec_row(rpl_group_info *rli) = 0;
#endif /* defined(MYSQL_SERVER) && defined(HAVE_REPLICATION) */
-
- friend class Old_rows_log_event;
};
/**
@@ -5607,9 +5090,6 @@ private:
#endif
};
-
-#include "log_event_old.h"
-
/**
@class Incident_log_event
diff --git a/sql/log_event_client.cc b/sql/log_event_client.cc
index 15d3ae8921b..acdbcadda16 100644
--- a/sql/log_event_client.cc
+++ b/sql/log_event_client.cc
@@ -2104,9 +2104,9 @@ err:
}
-bool Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
+bool Format_description_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
- DBUG_ENTER("Start_log_event_v3::print");
+ DBUG_ENTER("Format_description_log_event::print");
Write_on_release_cache cache(&print_event_info->head_cache, file,
Write_on_release_cache::FLUSH_F);
@@ -2188,122 +2188,6 @@ bool Start_encryption_log_event::print(FILE* file,
}
-bool Load_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
-{
- return print(file, print_event_info, 0);
-}
-
-
-bool Load_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info,
- bool commented)
-{
- Write_on_release_cache cache(&print_event_info->head_cache, file_arg);
- bool different_db= 1;
- DBUG_ENTER("Load_log_event::print");
-
- if (!print_event_info->short_form)
- {
- if (print_header(&cache, print_event_info, FALSE) ||
- my_b_printf(&cache, "\tQuery\tthread_id=%ld\texec_time=%ld\n",
- thread_id, exec_time))
- goto err;
- }
-
- if (db)
- {
- /*
- If the database is different from the one of the previous statement, we
- need to print the "use" command, and we update the last_db.
- But if commented, the "use" is going to be commented so we should not
- update the last_db.
- */
- if ((different_db= memcmp(print_event_info->db, db, db_len + 1)) &&
- !commented)
- memcpy(print_event_info->db, db, db_len + 1);
- }
-
- if (db && db[0] && different_db)
- if (my_b_printf(&cache, "%suse %`s%s\n",
- commented ? "# " : "",
- db, print_event_info->delimiter))
- goto err;
-
- if (flags & LOG_EVENT_THREAD_SPECIFIC_F)
- if (my_b_printf(&cache,"%sSET @@session.pseudo_thread_id=%lu%s\n",
- commented ? "# " : "", (ulong)thread_id,
- print_event_info->delimiter))
- goto err;
- if (my_b_printf(&cache, "%sLOAD DATA ",
- commented ? "# " : ""))
- goto err;
- if (check_fname_outside_temp_buf())
- if (my_b_write_string(&cache, "LOCAL "))
- goto err;
- if (my_b_printf(&cache, "INFILE '%-*s' ", fname_len, fname))
- goto err;
-
- if (sql_ex.opt_flags & REPLACE_FLAG)
- {
- if (my_b_write_string(&cache, "REPLACE "))
- goto err;
- }
- else if (sql_ex.opt_flags & IGNORE_FLAG)
- if (my_b_write_string(&cache, "IGNORE "))
- goto err;
-
- if (my_b_printf(&cache, "INTO TABLE `%s`", table_name) ||
- my_b_write_string(&cache, " FIELDS TERMINATED BY ") ||
- pretty_print_str(&cache, sql_ex.field_term, sql_ex.field_term_len))
- goto err;
-
- if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG)
- if (my_b_write_string(&cache, " OPTIONALLY "))
- goto err;
- if (my_b_write_string(&cache, " ENCLOSED BY ") ||
- pretty_print_str(&cache, sql_ex.enclosed, sql_ex.enclosed_len) ||
- my_b_write_string(&cache, " ESCAPED BY ") ||
- pretty_print_str(&cache, sql_ex.escaped, sql_ex.escaped_len) ||
- my_b_write_string(&cache, " LINES TERMINATED BY ") ||
- pretty_print_str(&cache, sql_ex.line_term, sql_ex.line_term_len))
- goto err;
-
- if (sql_ex.line_start)
- {
- if (my_b_write_string(&cache," STARTING BY ") ||
- pretty_print_str(&cache, sql_ex.line_start, sql_ex.line_start_len))
- goto err;
- }
- if ((long) skip_lines > 0)
- if (my_b_printf(&cache, " IGNORE %ld LINES", (long) skip_lines))
- goto err;
-
- if (num_fields)
- {
- uint i;
- const char* field = fields;
- if (my_b_write_string(&cache, " ("))
- goto err;
- for (i = 0; i < num_fields; i++)
- {
- if (i)
- if (my_b_write_byte(&cache, ','))
- goto err;
- if (my_b_printf(&cache, "%`s", field))
- goto err;
- field += field_lens[i] + 1;
- }
- if (my_b_write_byte(&cache, ')'))
- goto err;
- }
-
- if (my_b_printf(&cache, "%s\n", print_event_info->delimiter))
- goto err;
- DBUG_RETURN(cache.flush_data());
-err:
- DBUG_RETURN(1);
-}
-
-
bool Rotate_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
if (print_event_info->short_form)
@@ -2626,61 +2510,6 @@ bool Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
#endif
-bool Create_file_log_event::print(FILE* file,
- PRINT_EVENT_INFO* print_event_info,
- bool enable_local)
-{
- if (print_event_info->short_form)
- {
- if (enable_local && check_fname_outside_temp_buf())
- return Load_log_event::print(file, print_event_info);
- return 0;
- }
-
- Write_on_release_cache cache(&print_event_info->head_cache, file);
-
- if (enable_local)
- {
- if (Load_log_event::print(file, print_event_info,
- !check_fname_outside_temp_buf()))
- goto err;
-
- /**
- reduce the size of io cache so that the write function is called
- for every call to my_b_printf().
- */
- DBUG_EXECUTE_IF ("simulate_create_event_write_error",
- {(&cache)->write_pos= (&cache)->write_end;
- DBUG_SET("+d,simulate_file_write_error");});
- /*
- That one is for "file_id: etc" below: in mysqlbinlog we want the #, in
- SHOW BINLOG EVENTS we don't.
- */
- if (my_b_write_byte(&cache, '#'))
- goto err;
- }
-
- if (my_b_printf(&cache, " file_id: %d block_len: %d\n", file_id, block_len))
- goto err;
-
- return cache.flush_data();
-err:
- return 1;
-
-}
-
-
-bool Create_file_log_event::print(FILE* file,
- PRINT_EVENT_INFO* print_event_info)
-{
- return print(file, print_event_info, 0);
-}
-
-
-/*
- Append_block_log_event::print()
-*/
-
bool Append_block_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
{
@@ -2700,10 +2529,6 @@ err:
}
-/*
- Delete_file_log_event::print()
-*/
-
bool Delete_file_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
{
@@ -2719,25 +2544,6 @@ bool Delete_file_log_event::print(FILE* file,
return cache.flush_data();
}
-/*
- Execute_load_log_event::print()
-*/
-
-bool Execute_load_log_event::print(FILE* file,
- PRINT_EVENT_INFO* print_event_info)
-{
- if (print_event_info->short_form)
- return 0;
-
- Write_on_release_cache cache(&print_event_info->head_cache, file);
-
- if (print_header(&cache, print_event_info, FALSE) ||
- my_b_printf(&cache, "\n#Exec_load: file_id=%d\n",
- file_id))
- return 1;
-
- return cache.flush_data();
-}
bool Execute_load_query_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
@@ -2995,10 +2801,6 @@ err:
where fragments are represented by a pair of indexed user
"one shot" variables.
- @note
- If any changes made don't forget to duplicate them to
- Old_rows_log_event as long as it's supported.
-
@param file pointer to IO_CACHE
@param print_event_info pointer to print_event_info specializing
what out of and how to print the event
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
deleted file mode 100644
index 1990103598e..00000000000
--- a/sql/log_event_old.cc
+++ /dev/null
@@ -1,2749 +0,0 @@
-/* Copyright (c) 2007, 2019, Oracle and/or its affiliates.
- Copyright (c) 2009, 2019, MariaDB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
-
-#include "mariadb.h"
-#include "sql_priv.h"
-#ifndef MYSQL_CLIENT
-#include "unireg.h"
-#endif
-#include "log_event.h"
-#ifndef MYSQL_CLIENT
-#include "sql_cache.h" // QUERY_CACHE_FLAGS_SIZE
-#include "sql_base.h" // close_tables_for_reopen
-#include "key.h" // key_copy
-#include "lock.h" // mysql_unlock_tables
-#include "rpl_rli.h"
-#include "rpl_utility.h"
-#endif
-#include "log_event_old.h"
-#include "rpl_record_old.h"
-#include "transaction.h"
-
-PSI_memory_key key_memory_log_event_old;
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-
-// Old implementation of do_apply_event()
-int
-Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi)
-{
- DBUG_ENTER("Old_rows_log_event::do_apply_event(st_relay_log_info*)");
- int error= 0;
- THD *ev_thd= ev->thd;
- uchar const *row_start= ev->m_rows_buf;
- const Relay_log_info *rli= rgi->rli;
-
- /*
- If m_table_id == ~0UL, then we have a dummy event that does not
- contain any data. In that case, we just remove all tables in the
- tables_to_lock list, close the thread tables, and return with
- success.
- */
- if (ev->m_table_id == ~0UL)
- {
- /*
- This one is supposed to be set: just an extra check so that
- nothing strange has happened.
- */
- DBUG_ASSERT(ev->get_flags(Old_rows_log_event::STMT_END_F));
-
- rgi->slave_close_thread_tables(ev_thd);
- ev_thd->clear_error();
- DBUG_RETURN(0);
- }
-
- /*
- 'ev_thd' has been set by exec_relay_log_event(), just before calling
- do_apply_event(). We still check here to prevent future coding
- errors.
- */
- DBUG_ASSERT(rgi->thd == ev_thd);
-
- /*
- If there is no locks taken, this is the first binrow event seen
- after the table map events. We should then lock all the tables
- used in the transaction and proceed with execution of the actual
- event.
- */
- if (!ev_thd->lock)
- {
- /*
- Lock_tables() reads the contents of ev_thd->lex, so they must be
- initialized.
-
- We also call the THD::reset_for_next_command(), since this
- is the logical start of the next "statement". Note that this
- call might reset the value of current_stmt_binlog_format, so
- we need to do any changes to that value after this function.
- */
- delete_explain_query(thd->lex);
- lex_start(ev_thd);
- ev_thd->reset_for_next_command();
-
- /*
- This is a row injection, so we flag the "statement" as
- such. Note that this code is called both when the slave does row
- injections and when the BINLOG statement is used to do row
- injections.
- */
- ev_thd->lex->set_stmt_row_injection();
-
- if (unlikely(open_and_lock_tables(ev_thd, rgi->tables_to_lock, FALSE, 0)))
- {
- if (ev_thd->is_error())
- {
- /*
- Error reporting borrowed from Query_log_event with many excessive
- simplifications.
- We should not honour --slave-skip-errors at this point as we are
- having severe errors which should not be skipped.
- */
- rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL,
- "Error '%s' on opening tables",
- ev_thd->get_stmt_da()->message());
- ev_thd->is_slave_error= 1;
- }
- DBUG_RETURN(1);
- }
-
- /*
- When the open and locking succeeded, we check all tables to
- ensure that they still have the correct type.
- */
-
- {
- TABLE_LIST *table_list_ptr= rgi->tables_to_lock;
- for (uint i=0 ; table_list_ptr&& (i< rgi->tables_to_lock_count);
- table_list_ptr= table_list_ptr->next_global, i++)
- {
- /*
- Please see comment in log_event.cc-Rows_log_event::do_apply_event()
- function for the explanation of the below if condition
- */
- if (table_list_ptr->parent_l)
- continue;
- /*
- We can use a down cast here since we know that every table added
- to the tables_to_lock is a RPL_TABLE_LIST(or child table which is
- skipped above).
- */
- RPL_TABLE_LIST *ptr=static_cast<RPL_TABLE_LIST*>(table_list_ptr);
- DBUG_ASSERT(ptr->m_tabledef_valid);
- TABLE *conv_table;
- if (!ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table))
- {
- ev_thd->is_slave_error= 1;
- rgi->slave_close_thread_tables(ev_thd);
- DBUG_RETURN(Old_rows_log_event::ERR_BAD_TABLE_DEF);
- }
- DBUG_PRINT("debug", ("Table: %s.%s is compatible with master"
- " - conv_table: %p",
- ptr->table->s->db.str,
- ptr->table->s->table_name.str, conv_table));
- ptr->m_conv_table= conv_table;
- }
- }
-
- /*
- ... and then we add all the tables to the table map and remove
- them from tables to lock.
-
- We also invalidate the query cache for all the tables, since
- they will now be changed.
-
- TODO [/Matz]: Maybe the query cache should not be invalidated
- here? It might be that a table is not changed, even though it
- was locked for the statement. We do know that each
- Old_rows_log_event contain at least one row, so after processing one
- Old_rows_log_event, we can invalidate the query cache for the
- associated table.
- */
- TABLE_LIST *ptr= rgi->tables_to_lock;
- for (uint i=0; ptr && (i < rgi->tables_to_lock_count); ptr= ptr->next_global, i++)
- {
- /*
- Please see comment in log_event.cc-Rows_log_event::do_apply_event()
- function for the explanation of the below if condition
- */
- if (ptr->parent_l)
- continue;
- rgi->m_table_map.set_table(ptr->table_id, ptr->table);
- }
-#ifdef HAVE_QUERY_CACHE
- query_cache.invalidate_locked_for_write(thd, rgi->tables_to_lock);
-#endif
- }
-
- TABLE* table= rgi->m_table_map.get_table(ev->m_table_id);
-
- if (table)
- {
- /*
- table == NULL means that this table should not be replicated
- (this was set up by Table_map_log_event::do_apply_event()
- which tested replicate-* rules).
- */
-
- /*
- It's not needed to set_time() but
- 1) it continues the property that "Time" in SHOW PROCESSLIST shows how
- much slave is behind
- 2) it will be needed when we allow replication from a table with no
- TIMESTAMP column to a table with one.
- So we call set_time(), like in SBR. Presently it changes nothing.
- */
- ev_thd->set_time(ev->when, ev->when_sec_part);
- /*
- There are a few flags that are replicated with each row event.
- Make sure to set/clear them before executing the main body of
- the event.
- */
- if (ev->get_flags(Old_rows_log_event::NO_FOREIGN_KEY_CHECKS_F))
- ev_thd->variables.option_bits|= OPTION_NO_FOREIGN_KEY_CHECKS;
- else
- ev_thd->variables.option_bits&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
-
- if (ev->get_flags(Old_rows_log_event::RELAXED_UNIQUE_CHECKS_F))
- ev_thd->variables.option_bits|= OPTION_RELAXED_UNIQUE_CHECKS;
- else
- ev_thd->variables.option_bits&= ~OPTION_RELAXED_UNIQUE_CHECKS;
- /* A small test to verify that objects have consistent types */
- DBUG_ASSERT(sizeof(ev_thd->variables.option_bits) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
-
- table->rpl_write_set= table->write_set;
-
- error= do_before_row_operations(table);
- while (error == 0 && row_start < ev->m_rows_end)
- {
- uchar const *row_end= NULL;
- if (unlikely((error= do_prepare_row(ev_thd, rgi, table, row_start,
- &row_end))))
- break; // We should perform the after-row operation even in
- // the case of error
-
- DBUG_ASSERT(row_end != NULL); // cannot happen
- DBUG_ASSERT(row_end <= ev->m_rows_end);
-
- /* in_use can have been set to NULL in close_tables_for_reopen */
- THD* old_thd= table->in_use;
- if (!table->in_use)
- table->in_use= ev_thd;
- error= do_exec_row(table);
- table->in_use = old_thd;
- switch (error)
- {
- /* Some recoverable errors */
- case HA_ERR_RECORD_CHANGED:
- case HA_ERR_KEY_NOT_FOUND: /* Idempotency support: OK if
- tuple does not exist */
- error= 0;
- case 0:
- break;
-
- default:
- rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL,
- "Error in %s event: row application failed. %s",
- ev->get_type_str(),
- ev_thd->is_error() ? ev_thd->get_stmt_da()->message() : "");
- thd->is_slave_error= 1;
- break;
- }
-
- row_start= row_end;
- }
- DBUG_EXECUTE_IF("stop_slave_middle_group",
- const_cast<Relay_log_info*>(rli)->abort_slave= 1;);
- error= do_after_row_operations(table, error);
- }
-
- if (unlikely(error))
- { /* error has occurred during the transaction */
- rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL,
- "Error in %s event: error during transaction execution "
- "on table %s.%s. %s",
- ev->get_type_str(), table->s->db.str,
- table->s->table_name.str,
- ev_thd->is_error() ? ev_thd->get_stmt_da()->message() : "");
-
- /*
- If one day we honour --skip-slave-errors in row-based replication, and
- the error should be skipped, then we would clear mappings, rollback,
- close tables, but the slave SQL thread would not stop and then may
- assume the mapping is still available, the tables are still open...
- So then we should clear mappings/rollback/close here only if this is a
- STMT_END_F.
- For now we code, knowing that error is not skippable and so slave SQL
- thread is certainly going to stop.
- rollback at the caller along with sbr.
- */
- ev_thd->reset_current_stmt_binlog_format_row();
- rgi->cleanup_context(ev_thd, error);
- ev_thd->is_slave_error= 1;
- DBUG_RETURN(error);
- }
-
- DBUG_RETURN(0);
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-
-/*
- Check if there are more UNIQUE keys after the given key.
-*/
-static int
-last_uniq_key(TABLE *table, uint keyno)
-{
- while (++keyno < table->s->keys)
- if (table->key_info[keyno].flags & HA_NOSAME)
- return 0;
- return 1;
-}
-
-
-/*
- Compares table->record[0] and table->record[1]
-
- Returns TRUE if different.
-*/
-static bool record_compare(TABLE *table)
-{
- bool result= FALSE;
- if (table->s->blob_fields + table->s->varchar_fields == 0)
- {
- result= cmp_record(table,record[1]);
- goto record_compare_exit;
- }
-
- /* Compare null bits */
- if (memcmp(table->null_flags,
- table->null_flags+table->s->rec_buff_length,
- table->s->null_bytes))
- {
- result= TRUE; // Diff in NULL value
- goto record_compare_exit;
- }
-
- /* Compare updated fields */
- for (Field **ptr=table->field ; *ptr ; ptr++)
- {
- if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length))
- {
- result= TRUE;
- goto record_compare_exit;
- }
- }
-
-record_compare_exit:
- return result;
-}
-
-
-/*
- Copy "extra" columns from record[1] to record[0].
-
- Copy the extra fields that are not present on the master but are
- present on the slave from record[1] to record[0]. This is used
- after fetching a record that are to be updated, either inside
- replace_record() or as part of executing an update_row().
- */
-static int
-copy_extra_record_fields(TABLE *table,
- size_t master_reclength,
- my_ptrdiff_t master_fields)
-{
- DBUG_ENTER("copy_extra_record_fields(table, master_reclen, master_fields)");
- DBUG_PRINT("info", ("Copying to %p "
- "from field %lu at offset %lu "
- "to field %d at offset %lu",
- table->record[0],
- (ulong) master_fields, (ulong) master_reclength,
- table->s->fields, table->s->reclength));
- /*
- Copying the extra fields of the slave that does not exist on
- master into record[0] (which are basically the default values).
- */
-
- if (table->s->fields < (uint) master_fields)
- DBUG_RETURN(0);
-
- DBUG_ASSERT(master_reclength <= table->s->reclength);
- if (master_reclength < table->s->reclength)
- memcpy(table->record[0] + master_reclength,
- table->record[1] + master_reclength,
- table->s->reclength - master_reclength);
-
- /*
- Bit columns are special. We iterate over all the remaining
- columns and copy the "extra" bits to the new record. This is
- not a very good solution: it should be refactored on
- opportunity.
-
- REFACTORING SUGGESTION (Matz). Introduce a member function
- similar to move_field_offset() called copy_field_offset() to
- copy field values and implement it for all Field subclasses. Use
- this function to copy data from the found record to the record
- that are going to be inserted.
-
- The copy_field_offset() function need to be a virtual function,
- which in this case will prevent copying an entire range of
- fields efficiently.
- */
- {
- Field **field_ptr= table->field + master_fields;
- for ( ; *field_ptr ; ++field_ptr)
- {
- /*
- Set the null bit according to the values in record[1]
- */
- if ((*field_ptr)->maybe_null() &&
- (*field_ptr)->is_null_in_record(reinterpret_cast<uchar*>(table->record[1])))
- (*field_ptr)->set_null();
- else
- (*field_ptr)->set_notnull();
-
- /*
- Do the extra work for special columns.
- */
- switch ((*field_ptr)->real_type())
- {
- default:
- /* Nothing to do */
- break;
-
- case MYSQL_TYPE_BIT:
- Field_bit *f= static_cast<Field_bit*>(*field_ptr);
- if (f->bit_len > 0)
- {
- my_ptrdiff_t const offset= table->record[1] - table->record[0];
- uchar const bits=
- get_rec_bits(f->bit_ptr + offset, f->bit_ofs, f->bit_len);
- set_rec_bits(bits, f->bit_ptr, f->bit_ofs, f->bit_len);
- }
- break;
- }
- }
- }
- DBUG_RETURN(0); // All OK
-}
-
-
-/*
- Replace the provided record in the database.
-
- SYNOPSIS
- replace_record()
- thd Thread context for writing the record.
- table Table to which record should be written.
- master_reclength
- Offset to first column that is not present on the master,
- alternatively the length of the record on the master
- side.
-
- RETURN VALUE
- Error code on failure, 0 on success.
-
- DESCRIPTION
- Similar to how it is done in mysql_insert(), we first try to do
- a ha_write_row() and of that fails due to duplicated keys (or
- indices), we do an ha_update_row() or a ha_delete_row() instead.
- */
-static int
-replace_record(THD *thd, TABLE *table,
- ulong const master_reclength,
- uint const master_fields)
-{
- DBUG_ENTER("replace_record");
- DBUG_ASSERT(table != NULL && thd != NULL);
-
- int error;
- int keynum;
- auto_afree_ptr<char> key(NULL);
-
-#ifndef DBUG_OFF
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
- DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set);
- DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set);
-#endif
-
- while (unlikely(error= table->file->ha_write_row(table->record[0])))
- {
- if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT)
- {
- table->file->print_error(error, MYF(0)); /* to check at exec_relay_log_event */
- DBUG_RETURN(error);
- }
- if (unlikely((keynum= table->file->get_dup_key(error)) < 0))
- {
- table->file->print_error(error, MYF(0));
- /*
- We failed to retrieve the duplicate key
- - either because the error was not "duplicate key" error
- - or because the information which key is not available
- */
- DBUG_RETURN(error);
- }
-
- /*
- We need to retrieve the old row into record[1] to be able to
- either update or delete the offending record. We either:
-
- - use rnd_pos() with a row-id (available as dupp_row) to the
- offending row, if that is possible (MyISAM and Blackhole), or else
-
- - use index_read_idx() with the key that is duplicated, to
- retrieve the offending row.
- */
- if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
- {
- error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref);
- if (unlikely(error))
- {
- DBUG_PRINT("info",("rnd_pos() returns error %d",error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- }
- else
- {
- if (unlikely(table->file->extra(HA_EXTRA_FLUSH_CACHE)))
- {
- DBUG_RETURN(my_errno);
- }
-
- if (key.get() == NULL)
- {
- key.assign(static_cast<char*>(my_alloca(table->s->max_unique_length)));
- if (unlikely(key.get() == NULL))
- DBUG_RETURN(ENOMEM);
- }
-
- key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
- 0);
- error= table->file->ha_index_read_idx_map(table->record[1], keynum,
- (const uchar*)key.get(),
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT);
- if (unlikely(error))
- {
- DBUG_PRINT("info", ("index_read_idx() returns error %d", error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- }
-
- /*
- Now, table->record[1] should contain the offending row. That
- will enable us to update it or, alternatively, delete it (so
- that we can insert the new row afterwards).
-
- First we copy the columns into table->record[0] that are not
- present on the master from table->record[1], if there are any.
- */
- copy_extra_record_fields(table, master_reclength, master_fields);
-
- /*
- REPLACE is defined as either INSERT or DELETE + INSERT. If
- possible, we can replace it with an UPDATE, but that will not
- work on InnoDB if FOREIGN KEY checks are necessary.
-
- I (Matz) am not sure of the reason for the last_uniq_key()
- check as, but I'm guessing that it's something along the
- following lines.
-
- Suppose that we got the duplicate key to be a key that is not
- the last unique key for the table and we perform an update:
- then there might be another key for which the unique check will
- fail, so we're better off just deleting the row and inserting
- the correct row.
- */
- if (last_uniq_key(table, keynum) &&
- !table->file->referenced_by_foreign_key())
- {
- error=table->file->ha_update_row(table->record[1],
- table->record[0]);
- if (unlikely(error) && error != HA_ERR_RECORD_IS_THE_SAME)
- table->file->print_error(error, MYF(0));
- else
- error= 0;
- DBUG_RETURN(error);
- }
- else
- {
- if (unlikely((error= table->file->ha_delete_row(table->record[1]))))
- {
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- /* Will retry ha_write_row() with the offending row removed. */
- }
- }
-
- DBUG_RETURN(error);
-}
-
-
-/**
- Find the row given by 'key', if the table has keys, or else use a table scan
- to find (and fetch) the row.
-
- If the engine allows random access of the records, a combination of
- position() and rnd_pos() will be used.
-
- @param table Pointer to table to search
- @param key Pointer to key to use for search, if table has key
-
- @pre <code>table->record[0]</code> shall contain the row to locate
- and <code>key</code> shall contain a key to use for searching, if
- the engine has a key.
-
- @post If the return value is zero, <code>table->record[1]</code>
- will contain the fetched row and the internal "cursor" will refer to
- the row. If the return value is non-zero,
- <code>table->record[1]</code> is undefined. In either case,
- <code>table->record[0]</code> is undefined.
-
- @return Zero if the row was successfully fetched into
- <code>table->record[1]</code>, error code otherwise.
- */
-
-static int find_and_fetch_row(TABLE *table, uchar *key)
-{
- DBUG_ENTER("find_and_fetch_row(TABLE *table, uchar *key, uchar *record)");
- DBUG_PRINT("enter", ("table: %p, key: %p record: %p",
- table, key, table->record[1]));
-
- DBUG_ASSERT(table->in_use != NULL);
-
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
-
- if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
- table->s->primary_key < MAX_KEY)
- {
- /*
- Use a more efficient method to fetch the record given by
- table->record[0] if the engine allows it. We first compute a
- row reference using the position() member function (it will be
- stored in table->file->ref) and the use rnd_pos() to position
- the "cursor" (i.e., record[0] in this case) at the correct row.
-
- TODO: Add a check that the correct record has been fetched by
- comparing with the original record. Take into account that the
- record on the master and slave can be of different
- length. Something along these lines should work:
-
- ADD>>> store_record(table,record[1]);
- int error= table->file->ha_rnd_pos(table->record[0], table->file->ref);
- ADD>>> DBUG_ASSERT(memcmp(table->record[1], table->record[0],
- table->s->reclength) == 0);
-
- */
- table->file->position(table->record[0]);
- int error= table->file->ha_rnd_pos(table->record[0], table->file->ref);
- /*
- rnd_pos() returns the record in table->record[0], so we have to
- move it to table->record[1].
- */
- memcpy(table->record[1], table->record[0], table->s->reclength);
- DBUG_RETURN(error);
- }
-
- /* We need to retrieve all fields */
- /* TODO: Move this out from this function to main loop */
- table->use_all_columns();
-
- if (table->s->keys > 0)
- {
- int error;
- /* We have a key: search the table using the index */
- if (!table->file->inited &&
- unlikely(error= table->file->ha_index_init(0, FALSE)))
- {
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
-
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
-#ifndef HAVE_valgrind
- DBUG_DUMP("table->record[0]", table->record[0], table->s->reclength);
- DBUG_DUMP("table->record[1]", table->record[1], table->s->reclength);
-#endif
-
- /*
- We need to set the null bytes to ensure that the filler bit are
- all set when returning. There are storage engines that just set
- the necessary bits on the bytes and don't set the filler bits
- correctly.
- */
- my_ptrdiff_t const pos=
- table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
- table->record[1][pos]= 0xFF;
- if (unlikely((error= table->file->ha_index_read_map(table->record[1], key,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))))
- {
- table->file->print_error(error, MYF(0));
- table->file->ha_index_end();
- DBUG_RETURN(error);
- }
-
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
-#ifndef HAVE_valgrind
- DBUG_DUMP("table->record[0]", table->record[0], table->s->reclength);
- DBUG_DUMP("table->record[1]", table->record[1], table->s->reclength);
-#endif
- /*
- Below is a minor "optimization". If the key (i.e., key number
- 0) has the HA_NOSAME flag set, we know that we have found the
- correct record (since there can be no duplicates); otherwise, we
- have to compare the record with the one found to see if it is
- the correct one.
-
- CAVEAT! This behaviour is essential for the replication of,
- e.g., the mysql.proc table since the correct record *shall* be
- found using the primary key *only*. There shall be no
- comparison of non-PK columns to decide if the correct record is
- found. I can see no scenario where it would be incorrect to
- chose the row to change only using a PK or an UNNI.
- */
- if (table->key_info->flags & HA_NOSAME)
- {
- table->file->ha_index_end();
- DBUG_RETURN(0);
- }
-
- while (record_compare(table))
- {
- int error;
-
- while ((error= table->file->ha_index_next(table->record[1])))
- {
- table->file->print_error(error, MYF(0));
- table->file->ha_index_end();
- DBUG_RETURN(error);
- }
- }
-
- /*
- Have to restart the scan to be able to fetch the next row.
- */
- table->file->ha_index_end();
- }
- else
- {
- int restart_count= 0; // Number of times scanning has restarted from top
- int error;
-
- /* We don't have a key: search the table using rnd_next() */
- if (unlikely((error= table->file->ha_rnd_init_with_error(1))))
- return error;
-
- /* Continue until we find the right record or have made a full loop */
- do
- {
- error= table->file->ha_rnd_next(table->record[1]);
-
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
- DBUG_DUMP("record[1]", table->record[1], table->s->reclength);
-
- switch (error) {
- case 0:
- break;
-
- case HA_ERR_END_OF_FILE:
- if (++restart_count < 2)
- {
- int error2;
- if (unlikely((error2= table->file->ha_rnd_init_with_error(1))))
- DBUG_RETURN(error2);
- }
- break;
-
- default:
- table->file->print_error(error, MYF(0));
- DBUG_PRINT("info", ("Record not found"));
- (void) table->file->ha_rnd_end();
- DBUG_RETURN(error);
- }
- }
- while (restart_count < 2 && record_compare(table));
-
- /*
- Have to restart the scan to be able to fetch the next row.
- */
- DBUG_PRINT("info", ("Record %sfound", restart_count == 2 ? "not " : ""));
- table->file->ha_rnd_end();
-
- DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0);
- DBUG_RETURN(error);
- }
-
- DBUG_RETURN(0);
-}
-
-
-/**********************************************************
- Row handling primitives for Write_rows_log_event_old
- **********************************************************/
-
-int Write_rows_log_event_old::do_before_row_operations(TABLE *table)
-{
- int error= 0;
-
- /*
- We are using REPLACE semantics and not INSERT IGNORE semantics
- when writing rows, that is: new rows replace old rows. We need to
- inform the storage engine that it should use this behaviour.
- */
-
- /* Tell the storage engine that we are using REPLACE semantics. */
- thd->lex->duplicates= DUP_REPLACE;
-
- thd->lex->sql_command= SQLCOM_REPLACE;
- /*
- Do not raise the error flag in case of hitting to an unique attribute
- */
- table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
- table->file->extra(HA_EXTRA_IGNORE_NO_KEY);
- table->file->ha_start_bulk_insert(0);
- return error;
-}
-
-
-int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
-{
- int local_error= 0;
- table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
- table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
- /*
- resetting the extra with
- table->file->extra(HA_EXTRA_NO_IGNORE_NO_KEY);
- fires bug#27077
- todo: explain or fix
- */
- if (unlikely((local_error= table->file->ha_end_bulk_insert())))
- {
- table->file->print_error(local_error, MYF(0));
- }
- return error? error : local_error;
-}
-
-
-int
-Write_rows_log_event_old::do_prepare_row(THD *thd_arg,
- rpl_group_info *rgi,
- TABLE *table,
- uchar const *row_start,
- uchar const **row_end)
-{
- DBUG_ASSERT(table != NULL);
- DBUG_ASSERT(row_start && row_end);
-
- int error;
- error= unpack_row_old(rgi,
- table, m_width, table->record[0],
- row_start, m_rows_end,
- &m_cols, row_end, &m_master_reclength,
- table->write_set, PRE_GA_WRITE_ROWS_EVENT);
- bitmap_copy(table->read_set, table->write_set);
- return error;
-}
-
-
-int Write_rows_log_event_old::do_exec_row(TABLE *table)
-{
- DBUG_ASSERT(table != NULL);
- int error= replace_record(thd, table, m_master_reclength, m_width);
- return error;
-}
-
-
-/**********************************************************
- Row handling primitives for Delete_rows_log_event_old
- **********************************************************/
-
-int Delete_rows_log_event_old::do_before_row_operations(TABLE *table)
-{
- DBUG_ASSERT(m_memory == NULL);
-
- if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
- table->s->primary_key < MAX_KEY)
- {
- /*
- We don't need to allocate any memory for m_after_image and
- m_key since they are not used.
- */
- return 0;
- }
-
- int error= 0;
-
- if (table->s->keys > 0)
- {
- m_memory= (uchar*) my_multi_malloc(key_memory_log_event_old, MYF(MY_WME),
- &m_after_image,
- (uint) table->s->reclength,
- &m_key,
- (uint) table->key_info->key_length,
- NullS);
- }
- else
- {
- m_after_image= (uchar*) my_malloc(key_memory_log_event_old, table->s->reclength, MYF(MY_WME));
- m_memory= (uchar*)m_after_image;
- m_key= NULL;
- }
- if (!m_memory)
- return HA_ERR_OUT_OF_MEM;
-
- return error;
-}
-
-
-int Delete_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
-{
- /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
- table->file->ha_index_or_rnd_end();
- my_free(m_memory); // Free for multi_malloc
- m_memory= NULL;
- m_after_image= NULL;
- m_key= NULL;
-
- return error;
-}
-
-
-int
-Delete_rows_log_event_old::do_prepare_row(THD *thd_arg,
- rpl_group_info *rgi,
- TABLE *table,
- uchar const *row_start,
- uchar const **row_end)
-{
- int error;
- DBUG_ASSERT(row_start && row_end);
- /*
- This assertion actually checks that there is at least as many
- columns on the slave as on the master.
- */
- DBUG_ASSERT(table->s->fields >= m_width);
-
- error= unpack_row_old(rgi,
- table, m_width, table->record[0],
- row_start, m_rows_end,
- &m_cols, row_end, &m_master_reclength,
- table->read_set, PRE_GA_DELETE_ROWS_EVENT);
- /*
- If we will access rows using the random access method, m_key will
- be set to NULL, so we do not need to make a key copy in that case.
- */
- if (m_key)
- {
- KEY *const key_info= table->key_info;
-
- key_copy(m_key, table->record[0], key_info, 0);
- }
-
- return error;
-}
-
-
-int Delete_rows_log_event_old::do_exec_row(TABLE *table)
-{
- int error;
- DBUG_ASSERT(table != NULL);
-
- if (likely(!(error= ::find_and_fetch_row(table, m_key))))
- {
- /*
- Now we should have the right row to delete. We are using
- record[0] since it is guaranteed to point to a record with the
- correct value.
- */
- error= table->file->ha_delete_row(table->record[0]);
- }
- return error;
-}
-
-
-/**********************************************************
- Row handling primitives for Update_rows_log_event_old
- **********************************************************/
-
-int Update_rows_log_event_old::do_before_row_operations(TABLE *table)
-{
- DBUG_ASSERT(m_memory == NULL);
-
- int error= 0;
-
- if (table->s->keys > 0)
- {
- m_memory= (uchar*) my_multi_malloc(key_memory_log_event_old, MYF(MY_WME),
- &m_after_image,
- (uint) table->s->reclength,
- &m_key,
- (uint) table->key_info->key_length,
- NullS);
- }
- else
- {
- m_after_image= (uchar*) my_malloc(key_memory_log_event_old, table->s->reclength, MYF(MY_WME));
- m_memory= m_after_image;
- m_key= NULL;
- }
- if (!m_memory)
- return HA_ERR_OUT_OF_MEM;
-
- return error;
-}
-
-
-int Update_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
-{
- /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
- table->file->ha_index_or_rnd_end();
- my_free(m_memory);
- m_memory= NULL;
- m_after_image= NULL;
- m_key= NULL;
-
- return error;
-}
-
-
-int Update_rows_log_event_old::do_prepare_row(THD *thd_arg,
- rpl_group_info *rgi,
- TABLE *table,
- uchar const *row_start,
- uchar const **row_end)
-{
- int error;
- DBUG_ASSERT(row_start && row_end);
- /*
- This assertion actually checks that there is at least as many
- columns on the slave as on the master.
- */
- DBUG_ASSERT(table->s->fields >= m_width);
-
- /* record[0] is the before image for the update */
- error= unpack_row_old(rgi,
- table, m_width, table->record[0],
- row_start, m_rows_end,
- &m_cols, row_end, &m_master_reclength,
- table->read_set, PRE_GA_UPDATE_ROWS_EVENT);
- row_start = *row_end;
- /* m_after_image is the after image for the update */
- error= unpack_row_old(rgi,
- table, m_width, m_after_image,
- row_start, m_rows_end,
- &m_cols, row_end, &m_master_reclength,
- table->write_set, PRE_GA_UPDATE_ROWS_EVENT);
-
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
- DBUG_DUMP("m_after_image", m_after_image, table->s->reclength);
-
- /*
- If we will access rows using the random access method, m_key will
- be set to NULL, so we do not need to make a key copy in that case.
- */
- if (m_key)
- {
- KEY *const key_info= table->key_info;
-
- key_copy(m_key, table->record[0], key_info, 0);
- }
-
- return error;
-}
-
-
-int Update_rows_log_event_old::do_exec_row(TABLE *table)
-{
- DBUG_ASSERT(table != NULL);
-
- int error= ::find_and_fetch_row(table, m_key);
- if (unlikely(error))
- return error;
-
- /*
- We have to ensure that the new record (i.e., the after image) is
- in record[0] and the old record (i.e., the before image) is in
- record[1]. This since some storage engines require this (for
- example, the partition engine).
-
- Since find_and_fetch_row() puts the fetched record (i.e., the old
- record) in record[1], we can keep it there. We put the new record
- (i.e., the after image) into record[0], and copy the fields that
- are on the slave (i.e., in record[1]) into record[0], effectively
- overwriting the default values that where put there by the
- unpack_row() function.
- */
- memcpy(table->record[0], m_after_image, table->s->reclength);
- copy_extra_record_fields(table, m_master_reclength, m_width);
-
- /*
- Now we have the right row to update. The old row (the one we're
- looking for) is in record[1] and the new row has is in record[0].
- We also have copied the original values already in the slave's
- database into the after image delivered from the master.
- */
- error= table->file->ha_update_row(table->record[1], table->record[0]);
- if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
- error= 0;
-
- return error;
-}
-
-#endif
-
-
-/**************************************************************************
- Rows_log_event member functions
-**************************************************************************/
-
-#ifndef MYSQL_CLIENT
-Old_rows_log_event::Old_rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid,
- MY_BITMAP const *cols,
- bool is_transactional)
- : Log_event(thd_arg, 0, is_transactional),
- m_row_count(0),
- m_table(tbl_arg),
- m_table_id(tid),
- m_width(tbl_arg ? tbl_arg->s->fields : 1),
- m_rows_buf(0), m_rows_cur(0), m_rows_end(0), m_flags(0)
-#ifdef HAVE_REPLICATION
- , m_curr_row(NULL), m_curr_row_end(NULL), m_key(NULL)
-#endif
-{
-
- // This constructor should not be reached.
- assert(0);
-
- /*
- We allow a special form of dummy event when the table, and cols
- are null and the table id is ~0UL. This is a temporary
- solution, to be able to terminate a started statement in the
- binary log: the extraneous events will be removed in the future.
- */
- DBUG_ASSERT((tbl_arg && tbl_arg->s && tid != ~0UL) ||
- (!tbl_arg && !cols && tid == ~0UL));
-
- if (thd_arg->variables.option_bits & OPTION_NO_FOREIGN_KEY_CHECKS)
- set_flags(NO_FOREIGN_KEY_CHECKS_F);
- if (thd_arg->variables.option_bits & OPTION_RELAXED_UNIQUE_CHECKS)
- set_flags(RELAXED_UNIQUE_CHECKS_F);
- /* if my_bitmap_init fails, caught in is_valid() */
- if (likely(!my_bitmap_init(&m_cols,
- m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
- m_width)))
- {
- /* Cols can be zero if this is a dummy binrows event */
- if (likely(cols != NULL))
- {
- memcpy(m_cols.bitmap, cols->bitmap, no_bytes_in_map(cols));
- create_last_word_mask(&m_cols);
- }
- }
- else
- {
- // Needed because my_bitmap_init() does not set it to null on failure
- m_cols.bitmap= 0;
- }
-}
-#endif
-
-
-Old_rows_log_event::Old_rows_log_event(const uchar *buf, uint event_len,
- Log_event_type event_type,
- const Format_description_log_event
- *description_event)
- : Log_event(buf, description_event),
- m_row_count(0),
-#ifndef MYSQL_CLIENT
- m_table(NULL),
-#endif
- m_table_id(0), m_rows_buf(0), m_rows_cur(0), m_rows_end(0)
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- , m_curr_row(NULL), m_curr_row_end(NULL), m_key(NULL)
-#endif
-{
- DBUG_ENTER("Old_rows_log_event::Old_Rows_log_event(const char*,...)");
- uint8 const common_header_len= description_event->common_header_len;
- uint8 const post_header_len= description_event->post_header_len[event_type-1];
-
- DBUG_PRINT("enter",("event_len: %u common_header_len: %d "
- "post_header_len: %d",
- event_len, common_header_len,
- post_header_len));
-
- const uchar *post_start= buf + common_header_len;
- DBUG_DUMP("post_header", post_start, post_header_len);
- post_start+= RW_MAPID_OFFSET;
- if (post_header_len == 6)
- {
- /* Master is of an intermediate source tree before 5.1.4. Id is 4 bytes */
- m_table_id= uint4korr(post_start);
- post_start+= 4;
- }
- else
- {
- m_table_id= (ulong) uint6korr(post_start);
- post_start+= RW_FLAGS_OFFSET;
- }
-
- m_flags= uint2korr(post_start);
-
- uchar const *const var_start=
- (const uchar *)buf + common_header_len + post_header_len;
- uchar const *const ptr_width= var_start;
- uchar *ptr_after_width= (uchar*) ptr_width;
- DBUG_PRINT("debug", ("Reading from %p", ptr_after_width));
- m_width = net_field_length(&ptr_after_width);
- DBUG_PRINT("debug", ("m_width=%lu", m_width));
- /* Avoid reading out of buffer */
- if (ptr_after_width + m_width > (uchar *)buf + event_len)
- {
- m_cols.bitmap= NULL;
- DBUG_VOID_RETURN;
- }
-
- /* if my_bitmap_init fails, caught in is_valid() */
- if (likely(!my_bitmap_init(&m_cols,
- m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
- m_width)))
- {
- DBUG_PRINT("debug", ("Reading from %p", ptr_after_width));
- memcpy(m_cols.bitmap, ptr_after_width, (m_width + 7) / 8);
- create_last_word_mask(&m_cols);
- ptr_after_width+= (m_width + 7) / 8;
- DBUG_DUMP("m_cols", (uchar*) m_cols.bitmap, no_bytes_in_map(&m_cols));
- }
- else
- {
- // Needed because my_bitmap_init() does not set it to null on failure
- m_cols.bitmap= NULL;
- DBUG_VOID_RETURN;
- }
-
- const uchar* const ptr_rows_data= (const uchar*) ptr_after_width;
- size_t const data_size= event_len - (ptr_rows_data - (const uchar *) buf);
- DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %zu",
- m_table_id, m_flags, m_width, data_size));
- DBUG_DUMP("rows_data", (uchar*) ptr_rows_data, data_size);
-
- m_rows_buf= (uchar*) my_malloc(key_memory_log_event_old, data_size, MYF(MY_WME));
- if (likely((bool)m_rows_buf))
- {
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- m_curr_row= m_rows_buf;
-#endif
- m_rows_end= m_rows_buf + data_size;
- m_rows_cur= m_rows_end;
- memcpy(m_rows_buf, ptr_rows_data, data_size);
- }
- else
- m_cols.bitmap= 0; // to not free it
-
- DBUG_VOID_RETURN;
-}
-
-
-Old_rows_log_event::~Old_rows_log_event()
-{
- if (m_cols.bitmap == m_bitbuf) // no my_malloc happened
- m_cols.bitmap= 0; // so no my_free in my_bitmap_free
- my_bitmap_free(&m_cols); // To pair with my_bitmap_init().
- my_free(m_rows_buf);
-}
-
-
-int Old_rows_log_event::get_data_size()
-{
- uchar buf[MAX_INT_WIDTH];
- uchar *end= net_store_length(buf, (m_width + 7) / 8);
-
- DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
- return (int)(6 + no_bytes_in_map(&m_cols) + (end - buf) +
- m_rows_cur - m_rows_buf););
- int data_size= ROWS_HEADER_LEN;
- data_size+= no_bytes_in_map(&m_cols);
- data_size+= (uint) (end - buf);
-
- data_size+= (uint) (m_rows_cur - m_rows_buf);
- return data_size;
-}
-
-
-#ifndef MYSQL_CLIENT
-int Old_rows_log_event::do_add_row_data(uchar *row_data, size_t length)
-{
- /*
- When the table has a primary key, we would probably want, by default, to
- log only the primary key value instead of the entire "before image". This
- would save binlog space. TODO
- */
- DBUG_ENTER("Old_rows_log_event::do_add_row_data");
- DBUG_PRINT("enter", ("row_data: %p length: %zu",row_data,
- length));
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
-#ifndef HAVE_valgrind
- DBUG_DUMP("row_data", row_data, MY_MIN(length, 32));
-#endif
-
- DBUG_ASSERT(m_rows_buf <= m_rows_cur);
- DBUG_ASSERT(!m_rows_buf || (m_rows_end && m_rows_buf < m_rows_end));
- DBUG_ASSERT(m_rows_cur <= m_rows_end);
-
- /* The cast will always work since m_rows_cur <= m_rows_end */
- if (static_cast<size_t>(m_rows_end - m_rows_cur) <= length)
- {
- size_t const block_size= 1024;
- my_ptrdiff_t const cur_size= m_rows_cur - m_rows_buf;
- my_ptrdiff_t const new_alloc=
- block_size * ((cur_size + length + block_size - 1) / block_size);
-
- uchar* const new_buf= (uchar*)my_realloc(key_memory_log_event_old, (uchar*)m_rows_buf, (uint) new_alloc,
- MYF(MY_ALLOW_ZERO_PTR|MY_WME));
- if (unlikely(!new_buf))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
-
- /* If the memory moved, we need to move the pointers */
- if (new_buf != m_rows_buf)
- {
- m_rows_buf= new_buf;
- m_rows_cur= m_rows_buf + cur_size;
- }
-
- /*
- The end pointer should always be changed to point to the end of
- the allocated memory.
- */
- m_rows_end= m_rows_buf + new_alloc;
- }
-
- DBUG_ASSERT(m_rows_cur + length <= m_rows_end);
- memcpy(m_rows_cur, row_data, length);
- m_rows_cur+= length;
- m_row_count++;
- DBUG_RETURN(0);
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
-{
- DBUG_ENTER("Old_rows_log_event::do_apply_event(Relay_log_info*)");
- int error= 0;
- Relay_log_info const *rli= rgi->rli;
-
- /*
- If m_table_id == ~0UL, then we have a dummy event that does not
- contain any data. In that case, we just remove all tables in the
- tables_to_lock list, close the thread tables, and return with
- success.
- */
- if (m_table_id == ~0UL)
- {
- /*
- This one is supposed to be set: just an extra check so that
- nothing strange has happened.
- */
- DBUG_ASSERT(get_flags(STMT_END_F));
-
- rgi->slave_close_thread_tables(thd);
- thd->clear_error();
- DBUG_RETURN(0);
- }
-
- /*
- 'thd' has been set by exec_relay_log_event(), just before calling
- do_apply_event(). We still check here to prevent future coding
- errors.
- */
- DBUG_ASSERT(rgi->thd == thd);
-
- /*
- If there is no locks taken, this is the first binrow event seen
- after the table map events. We should then lock all the tables
- used in the transaction and proceed with execution of the actual
- event.
- */
- if (!thd->lock)
- {
- /*
- lock_tables() reads the contents of thd->lex, so they must be
- initialized. Contrary to in
- Table_map_log_event::do_apply_event() we don't call
- mysql_init_query() as that may reset the binlog format.
- */
- lex_start(thd);
-
- if (unlikely((error= lock_tables(thd, rgi->tables_to_lock,
- rgi->tables_to_lock_count, 0))))
- {
- if (thd->is_slave_error || thd->is_fatal_error)
- {
- /*
- Error reporting borrowed from Query_log_event with many excessive
- simplifications (we don't honour --slave-skip-errors)
- */
- uint actual_error= thd->net.last_errno;
- rli->report(ERROR_LEVEL, actual_error, NULL,
- "Error '%s' in %s event: when locking tables",
- (actual_error ? thd->net.last_error :
- "unexpected success or fatal error"),
- get_type_str());
- thd->is_fatal_error= 1;
- }
- else
- {
- rli->report(ERROR_LEVEL, error, NULL,
- "Error in %s event: when locking tables",
- get_type_str());
- }
- rgi->slave_close_thread_tables(thd);
- DBUG_RETURN(error);
- }
-
- /*
- When the open and locking succeeded, we check all tables to
- ensure that they still have the correct type.
- */
-
- {
- TABLE_LIST *table_list_ptr= rgi->tables_to_lock;
- for (uint i=0; table_list_ptr&& (i< rgi->tables_to_lock_count);
- table_list_ptr= static_cast<RPL_TABLE_LIST*>(table_list_ptr->next_global), i++)
- {
- /*
- Please see comment in log_event.cc-Rows_log_event::do_apply_event()
- function for the explanation of the below if condition
- */
- if (table_list_ptr->parent_l)
- continue;
- /*
- We can use a down cast here since we know that every table added
- to the tables_to_lock is a RPL_TABLE_LIST (or child table which is
- skipped above).
- */
- RPL_TABLE_LIST *ptr=static_cast<RPL_TABLE_LIST*>(table_list_ptr);
- TABLE *conv_table;
- if (ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table))
- {
- thd->is_slave_error= 1;
- rgi->slave_close_thread_tables(thd);
- DBUG_RETURN(ERR_BAD_TABLE_DEF);
- }
- ptr->m_conv_table= conv_table;
- }
- }
-
- /*
- ... and then we add all the tables to the table map but keep
- them in the tables to lock list.
-
-
- We also invalidate the query cache for all the tables, since
- they will now be changed.
-
- TODO [/Matz]: Maybe the query cache should not be invalidated
- here? It might be that a table is not changed, even though it
- was locked for the statement. We do know that each
- Old_rows_log_event contain at least one row, so after processing one
- Old_rows_log_event, we can invalidate the query cache for the
- associated table.
- */
- for (TABLE_LIST *ptr= rgi->tables_to_lock ; ptr ; ptr= ptr->next_global)
- {
- rgi->m_table_map.set_table(ptr->table_id, ptr->table);
- }
-#ifdef HAVE_QUERY_CACHE
- query_cache.invalidate_locked_for_write(thd, rgi->tables_to_lock);
-#endif
- }
-
- TABLE*
- table=
- m_table= rgi->m_table_map.get_table(m_table_id);
-
- if (table)
- {
- /*
- table == NULL means that this table should not be replicated
- (this was set up by Table_map_log_event::do_apply_event()
- which tested replicate-* rules).
- */
-
- /*
- It's not needed to set_time() but
- 1) it continues the property that "Time" in SHOW PROCESSLIST shows how
- much slave is behind
- 2) it will be needed when we allow replication from a table with no
- TIMESTAMP column to a table with one.
- So we call set_time(), like in SBR. Presently it changes nothing.
- */
- thd->set_time(when, when_sec_part);
- /*
- There are a few flags that are replicated with each row event.
- Make sure to set/clear them before executing the main body of
- the event.
- */
- if (get_flags(NO_FOREIGN_KEY_CHECKS_F))
- thd->variables.option_bits|= OPTION_NO_FOREIGN_KEY_CHECKS;
- else
- thd->variables.option_bits&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
-
- if (get_flags(RELAXED_UNIQUE_CHECKS_F))
- thd->variables.option_bits|= OPTION_RELAXED_UNIQUE_CHECKS;
- else
- thd->variables.option_bits&= ~OPTION_RELAXED_UNIQUE_CHECKS;
- /* A small test to verify that objects have consistent types */
- DBUG_ASSERT(sizeof(thd->variables.option_bits) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
-
- if ( m_width == table->s->fields && bitmap_is_set_all(&m_cols))
- set_flags(COMPLETE_ROWS_F);
-
- /*
- Set tables write and read sets.
-
- Read_set contains all slave columns (in case we are going to fetch
- a complete record from slave)
-
- Write_set equals the m_cols bitmap sent from master but it can be
- longer if slave has extra columns.
- */
-
- DBUG_PRINT_BITSET("debug", "Setting table's write_set from: %s", &m_cols);
-
- bitmap_set_all(table->read_set);
- bitmap_set_all(table->write_set);
- if (!get_flags(COMPLETE_ROWS_F))
- bitmap_intersect(table->write_set,&m_cols);
- table->rpl_write_set= table->write_set;
-
- // Do event specific preparations
-
- error= do_before_row_operations(rli);
-
- // row processing loop
-
- while (error == 0 && m_curr_row < m_rows_end)
- {
- /* in_use can have been set to NULL in close_tables_for_reopen */
- THD* old_thd= table->in_use;
- if (!table->in_use)
- table->in_use= thd;
-
- error= do_exec_row(rgi);
-
- DBUG_PRINT("info", ("error: %d", error));
- DBUG_ASSERT(error != HA_ERR_RECORD_DELETED);
-
- table->in_use = old_thd;
- switch (error)
- {
- case 0:
- break;
-
- /* Some recoverable errors */
- case HA_ERR_RECORD_CHANGED:
- case HA_ERR_KEY_NOT_FOUND: /* Idempotency support: OK if
- tuple does not exist */
- error= 0;
- break;
-
- default:
- rli->report(ERROR_LEVEL, thd->net.last_errno, NULL,
- "Error in %s event: row application failed. %s",
- get_type_str(), thd->net.last_error);
- thd->is_slave_error= 1;
- break;
- }
-
- /*
- If m_curr_row_end was not set during event execution (e.g., because
- of errors) we can't proceed to the next row. If the error is transient
- (i.e., error==0 at this point) we must call unpack_current_row() to set
- m_curr_row_end.
- */
-
- DBUG_PRINT("info", ("error: %d", error));
- DBUG_PRINT("info", ("curr_row: %p; curr_row_end:%p; rows_end: %p",
- m_curr_row, m_curr_row_end, m_rows_end));
-
- if (!m_curr_row_end && likely(!error))
- unpack_current_row(rgi);
-
- // at this moment m_curr_row_end should be set
- DBUG_ASSERT(error || m_curr_row_end != NULL);
- DBUG_ASSERT(error || m_curr_row < m_curr_row_end);
- DBUG_ASSERT(error || m_curr_row_end <= m_rows_end);
-
- m_curr_row= m_curr_row_end;
-
- } // row processing loop
-
- DBUG_EXECUTE_IF("stop_slave_middle_group",
- const_cast<Relay_log_info*>(rli)->abort_slave= 1;);
- error= do_after_row_operations(rli, error);
- } // if (table)
-
- if (unlikely(error))
- { /* error has occurred during the transaction */
- rli->report(ERROR_LEVEL, thd->net.last_errno, NULL,
- "Error in %s event: error during transaction execution "
- "on table %s.%s. %s",
- get_type_str(), table->s->db.str,
- table->s->table_name.str,
- thd->net.last_error);
-
- /*
- If one day we honour --skip-slave-errors in row-based replication, and
- the error should be skipped, then we would clear mappings, rollback,
- close tables, but the slave SQL thread would not stop and then may
- assume the mapping is still available, the tables are still open...
- So then we should clear mappings/rollback/close here only if this is a
- STMT_END_F.
- For now we code, knowing that error is not skippable and so slave SQL
- thread is certainly going to stop.
- rollback at the caller along with sbr.
- */
- thd->reset_current_stmt_binlog_format_row();
- rgi->cleanup_context(thd, error);
- thd->is_slave_error= 1;
- DBUG_RETURN(error);
- }
-
- /*
- This code would ideally be placed in do_update_pos() instead, but
- since we have no access to table there, we do the setting of
- last_event_start_time here instead.
- */
- if (table && (table->s->primary_key == MAX_KEY) &&
- !use_trans_cache() && get_flags(STMT_END_F) == RLE_NO_FLAGS)
- {
- /*
- ------------ Temporary fix until WL#2975 is implemented ---------
-
- This event is not the last one (no STMT_END_F). If we stop now
- (in case of terminate_slave_thread()), how will we restart? We
- have to restart from Table_map_log_event, but as this table is
- not transactional, the rows already inserted will still be
- present, and idempotency is not guaranteed (no PK) so we risk
- that repeating leads to double insert. So we desperately try to
- continue, hope we'll eventually leave this buggy situation (by
- executing the final Old_rows_log_event). If we are in a hopeless
- wait (reached end of last relay log and nothing gets appended
- there), we timeout after one minute, and notify DBA about the
- problem. When WL#2975 is implemented, just remove the member
- Relay_log_info::last_event_start_time and all its occurrences.
- */
- rgi->last_event_start_time= my_time(0);
- }
-
- if (get_flags(STMT_END_F))
- {
- /*
- This is the end of a statement or transaction, so close (and
- unlock) the tables we opened when processing the
- Table_map_log_event starting the statement.
-
- OBSERVER. This will clear *all* mappings, not only those that
- are open for the table. There is not good handle for on-close
- actions for tables.
-
- NOTE. Even if we have no table ('table' == 0) we still need to be
- here, so that we increase the group relay log position. If we didn't, we
- could have a group relay log position which lags behind "forever"
- (assume the last master's transaction is ignored by the slave because of
- replicate-ignore rules).
- */
- int binlog_error= thd->binlog_flush_pending_rows_event(TRUE);
-
- /*
- If this event is not in a transaction, the call below will, if some
- transactional storage engines are involved, commit the statement into
- them and flush the pending event to binlog.
- If this event is in a transaction, the call will do nothing, but a
- Xid_log_event will come next which will, if some transactional engines
- are involved, commit the transaction and flush the pending event to the
- binlog.
- If there was a deadlock the transaction should have been rolled back
- already. So there should be no need to rollback the transaction.
- */
- DBUG_ASSERT(! thd->transaction_rollback_request);
- if (unlikely((error= (binlog_error ?
- trans_rollback_stmt(thd) :
- trans_commit_stmt(thd)))))
- rli->report(ERROR_LEVEL, error, NULL,
- "Error in %s event: commit of row events failed, "
- "table `%s`.`%s`",
- get_type_str(), m_table->s->db.str,
- m_table->s->table_name.str);
- error|= binlog_error;
-
- /*
- Now what if this is not a transactional engine? we still need to
- flush the pending event to the binlog; we did it with
- thd->binlog_flush_pending_rows_event(). Note that we imitate
- what is done for real queries: a call to
- ha_autocommit_or_rollback() (sometimes only if involves a
- transactional engine), and a call to be sure to have the pending
- event flushed.
- */
-
- thd->reset_current_stmt_binlog_format_row();
- rgi->cleanup_context(thd, 0);
- }
-
- DBUG_RETURN(error);
-}
-
-
-Log_event::enum_skip_reason
-Old_rows_log_event::do_shall_skip(rpl_group_info *rgi)
-{
- /*
- If the slave skip counter is 1 and this event does not end a
- statement, then we should not start executing on the next event.
- Otherwise, we defer the decision to the normal skipping logic.
- */
- if (rgi->rli->slave_skip_counter == 1 && !get_flags(STMT_END_F))
- return Log_event::EVENT_SKIP_IGNORE;
- else
- return Log_event::do_shall_skip(rgi);
-}
-
-int
-Old_rows_log_event::do_update_pos(rpl_group_info *rgi)
-{
- Relay_log_info *rli= rgi->rli;
- int error= 0;
- DBUG_ENTER("Old_rows_log_event::do_update_pos");
-
- DBUG_PRINT("info", ("flags: %s",
- get_flags(STMT_END_F) ? "STMT_END_F " : ""));
-
- if (get_flags(STMT_END_F))
- {
- /*
- Indicate that a statement is finished.
- Step the group log position if we are not in a transaction,
- otherwise increase the event log position.
- */
- error= rli->stmt_done(log_pos, thd, rgi);
- /*
- Clear any errors in thd->net.last_err*. It is not known if this is
- needed or not. It is believed that any errors that may exist in
- thd->net.last_err* are allowed. Examples of errors are "key not
- found", which is produced in the test case rpl_row_conflicts.test
- */
- thd->clear_error();
- }
- else
- {
- rgi->inc_event_relay_log_pos();
- }
-
- DBUG_RETURN(error);
-}
-
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-
-
-#ifndef MYSQL_CLIENT
-bool Old_rows_log_event::write_data_header()
-{
- uchar buf[ROWS_HEADER_LEN]; // No need to init the buffer
-
- // This method should not be reached.
- assert(0);
-
- DBUG_ASSERT(m_table_id != ~0UL);
- DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
- {
- int4store(buf + 0, m_table_id);
- int2store(buf + 4, m_flags);
- return write_data(buf, 6);
- });
- int6store(buf + RW_MAPID_OFFSET, (ulonglong)m_table_id);
- int2store(buf + RW_FLAGS_OFFSET, m_flags);
- return write_data(buf, ROWS_HEADER_LEN);
-}
-
-
-bool Old_rows_log_event::write_data_body()
-{
- /*
- Note that this should be the number of *bits*, not the number of
- bytes.
- */
- uchar sbuf[MAX_INT_WIDTH];
- my_ptrdiff_t const data_size= m_rows_cur - m_rows_buf;
-
- // This method should not be reached.
- assert(0);
-
- bool res= false;
- uchar *const sbuf_end= net_store_length(sbuf, (size_t) m_width);
- DBUG_ASSERT(static_cast<size_t>(sbuf_end - sbuf) <= sizeof(sbuf));
-
- DBUG_DUMP("m_width", sbuf, (size_t) (sbuf_end - sbuf));
- res= res || write_data(sbuf, (size_t) (sbuf_end - sbuf));
-
- DBUG_DUMP("m_cols", (uchar*) m_cols.bitmap, no_bytes_in_map(&m_cols));
- res= res || write_data((uchar*)m_cols.bitmap, no_bytes_in_map(&m_cols));
- DBUG_DUMP("rows", m_rows_buf, data_size);
- res= res || write_data(m_rows_buf, (size_t) data_size);
-
- return res;
-
-}
-#endif
-
-
-#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
-void Old_rows_log_event::pack_info(Protocol *protocol)
-{
- char buf[256];
- char const *const flagstr=
- get_flags(STMT_END_F) ? " flags: STMT_END_F" : "";
- size_t bytes= my_snprintf(buf, sizeof(buf),
- "table_id: %lu%s", m_table_id, flagstr);
- protocol->store(buf, bytes, &my_charset_bin);
-}
-#endif
-
-
-#ifdef MYSQL_CLIENT
-/* Method duplicates Rows_log_event's one */
-bool Old_rows_log_event::print_helper(FILE *file,
- PRINT_EVENT_INFO *print_event_info,
- char const *const name)
-{
- IO_CACHE *const head= &print_event_info->head_cache;
- IO_CACHE *const body= &print_event_info->body_cache;
- IO_CACHE *const tail= &print_event_info->tail_cache;
- bool do_print_encoded=
- print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS &&
- print_event_info->base64_output_mode != BASE64_OUTPUT_NEVER &&
- !print_event_info->short_form;
-
- if (!print_event_info->short_form)
- {
- if (print_header(head, print_event_info, !do_print_encoded) ||
- my_b_printf(head, "\t%s: table id %lu%s\n",
- name, m_table_id,
- do_print_encoded ? " flags: STMT_END_F" : "") ||
- print_base64(body, print_event_info, do_print_encoded))
- goto err;
- }
-
- if (get_flags(STMT_END_F))
- {
- if (copy_event_cache_to_file_and_reinit(head, file) ||
- copy_cache_to_file_wrapped(body, file, do_print_encoded,
- print_event_info->delimiter,
- print_event_info->verbose) ||
- copy_event_cache_to_file_and_reinit(tail, file))
- goto err;
- }
- return 0;
-err:
- return 1;
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-/**
- Write the current row into event's table.
-
- The row is located in the row buffer, pointed by @c m_curr_row member.
- Number of columns of the row is stored in @c m_width member (it can be
- different from the number of columns in the table to which we insert).
- Bitmap @c m_cols indicates which columns are present in the row. It is assumed
- that event's table is already open and pointed by @c m_table.
-
- If the same record already exists in the table it can be either overwritten
- or an error is reported depending on the value of @c overwrite flag
- (error reporting not yet implemented). Note that the matching record can be
- different from the row we insert if we use primary keys to identify records in
- the table.
-
- The row to be inserted can contain values only for selected columns. The
- missing columns are filled with default values using @c prepare_record()
- function. If a matching record is found in the table and @c overwritte is
- true, the missing columns are taken from it.
-
- @param rli Relay log info (needed for row unpacking).
- @param overwrite
- Shall we overwrite if the row already exists or signal
- error (currently ignored).
-
- @returns Error code on failure, 0 on success.
-
- This method, if successful, sets @c m_curr_row_end pointer to point at the
- next row in the rows buffer. This is done when unpacking the row to be
- inserted.
-
- @note If a matching record is found, it is either updated using
- @c ha_update_row() or first deleted and then new record written.
-*/
-
-int
-Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite)
-{
- DBUG_ENTER("write_row");
- DBUG_ASSERT(m_table != NULL && thd != NULL);
-
- TABLE *table= m_table; // pointer to event's table
- int error;
- int keynum;
- auto_afree_ptr<char> key(NULL);
-
- /* fill table->record[0] with default values */
-
- if (unlikely((error=
- prepare_record(table, m_width,
- TRUE /* check if columns have def. values */))))
- DBUG_RETURN(error);
-
- /* unpack row into table->record[0] */
- if ((error= unpack_current_row(rgi)))
- DBUG_RETURN(error);
-
-#ifndef DBUG_OFF
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
- DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set);
- DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set);
-#endif
-
- /*
- Try to write record. If a corresponding record already exists in the table,
- we try to change it using ha_update_row() if possible. Otherwise we delete
- it and repeat the whole process again.
-
- TODO: Add safety measures against infinite looping.
- */
-
- while (unlikely(error= table->file->ha_write_row(table->record[0])))
- {
- if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT)
- {
- table->file->print_error(error, MYF(0)); /* to check at exec_relay_log_event */
- DBUG_RETURN(error);
- }
- if (unlikely((keynum= table->file->get_dup_key(error)) < 0))
- {
- DBUG_PRINT("info",("Can't locate duplicate key (get_dup_key returns %d)",keynum));
- table->file->print_error(error, MYF(0));
- /*
- We failed to retrieve the duplicate key
- - either because the error was not "duplicate key" error
- - or because the information which key is not available
- */
- DBUG_RETURN(error);
- }
-
- /*
- We need to retrieve the old row into record[1] to be able to
- either update or delete the offending record. We either:
-
- - use rnd_pos() with a row-id (available as dupp_row) to the
- offending row, if that is possible (MyISAM and Blackhole), or else
-
- - use index_read_idx() with the key that is duplicated, to
- retrieve the offending row.
- */
- if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
- {
- DBUG_PRINT("info",("Locating offending record using rnd_pos()"));
- error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref);
- if (unlikely(error))
- {
- DBUG_PRINT("info",("rnd_pos() returns error %d",error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- }
- else
- {
- DBUG_PRINT("info",("Locating offending record using index_read_idx()"));
-
- if (table->file->extra(HA_EXTRA_FLUSH_CACHE))
- {
- DBUG_PRINT("info",("Error when setting HA_EXTRA_FLUSH_CACHE"));
- DBUG_RETURN(my_errno);
- }
-
- if (key.get() == NULL)
- {
- key.assign(static_cast<char*>(my_alloca(table->s->max_unique_length)));
- if (unlikely(key.get() == NULL))
- {
- DBUG_PRINT("info",("Can't allocate key buffer"));
- DBUG_RETURN(ENOMEM);
- }
- }
-
- key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
- 0);
- error= table->file->ha_index_read_idx_map(table->record[1], keynum,
- (const uchar*)key.get(),
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT);
- if (unlikely(error))
- {
- DBUG_PRINT("info",("index_read_idx() returns error %d", error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- }
-
- /*
- Now, record[1] should contain the offending row. That
- will enable us to update it or, alternatively, delete it (so
- that we can insert the new row afterwards).
- */
-
- /*
- If row is incomplete we will use the record found to fill
- missing columns.
- */
- if (!get_flags(COMPLETE_ROWS_F))
- {
- restore_record(table,record[1]);
- error= unpack_current_row(rgi);
- }
-
-#ifndef DBUG_OFF
- DBUG_PRINT("debug",("preparing for update: before and after image"));
- DBUG_DUMP("record[1] (before)", table->record[1], table->s->reclength);
- DBUG_DUMP("record[0] (after)", table->record[0], table->s->reclength);
-#endif
-
- /*
- REPLACE is defined as either INSERT or DELETE + INSERT. If
- possible, we can replace it with an UPDATE, but that will not
- work on InnoDB if FOREIGN KEY checks are necessary.
-
- I (Matz) am not sure of the reason for the last_uniq_key()
- check as, but I'm guessing that it's something along the
- following lines.
-
- Suppose that we got the duplicate key to be a key that is not
- the last unique key for the table and we perform an update:
- then there might be another key for which the unique check will
- fail, so we're better off just deleting the row and inserting
- the correct row.
- */
- if (last_uniq_key(table, keynum) &&
- !table->file->referenced_by_foreign_key())
- {
- DBUG_PRINT("info",("Updating row using ha_update_row()"));
- error=table->file->ha_update_row(table->record[1],
- table->record[0]);
- switch (error) {
-
- case HA_ERR_RECORD_IS_THE_SAME:
- DBUG_PRINT("info",("ignoring HA_ERR_RECORD_IS_THE_SAME error from"
- " ha_update_row()"));
- error= 0;
-
- case 0:
- break;
-
- default:
- DBUG_PRINT("info",("ha_update_row() returns error %d",error));
- table->file->print_error(error, MYF(0));
- }
-
- DBUG_RETURN(error);
- }
- else
- {
- DBUG_PRINT("info",("Deleting offending row and trying to write new one again"));
- if (unlikely((error= table->file->ha_delete_row(table->record[1]))))
- {
- DBUG_PRINT("info",("ha_delete_row() returns error %d",error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- /* Will retry ha_write_row() with the offending row removed. */
- }
- }
-
- DBUG_RETURN(error);
-}
-
-
-/**
- Locate the current row in event's table.
-
- The current row is pointed by @c m_curr_row. Member @c m_width tells how many
- columns are there in the row (this can be differnet from the number of columns
- in the table). It is assumed that event's table is already open and pointed
- by @c m_table.
-
- If a corresponding record is found in the table it is stored in
- @c m_table->record[0]. Note that when record is located based on a primary
- key, it is possible that the record found differs from the row being located.
-
- If no key is specified or table does not have keys, a table scan is used to
- find the row. In that case the row should be complete and contain values for
- all columns. However, it can still be shorter than the table, i.e. the table
- can contain extra columns not present in the row. It is also possible that
- the table has fewer columns than the row being located.
-
- @returns Error code on failure, 0 on success.
-
- @post In case of success @c m_table->record[0] contains the record found.
- Also, the internal "cursor" of the table is positioned at the record found.
-
- @note If the engine allows random access of the records, a combination of
- @c position() and @c rnd_pos() will be used.
-
- Note that one MUST call ha_index_or_rnd_end() after this function if
- it returns 0 as we must leave the row position in the handler intact
- for any following update/delete command.
-*/
-
-int Old_rows_log_event::find_row(rpl_group_info *rgi)
-{
- DBUG_ENTER("find_row");
-
- DBUG_ASSERT(m_table && m_table->in_use != NULL);
-
- TABLE *table= m_table;
- int error;
-
- /* unpack row - missing fields get default values */
-
- // TODO: shall we check and report errors here?
- prepare_record(table, m_width, FALSE /* don't check errors */);
- error= unpack_current_row(rgi);
-
-#ifndef DBUG_OFF
- DBUG_PRINT("info",("looking for the following record"));
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
-#endif
-
- if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
- table->s->primary_key < MAX_KEY)
- {
- /*
- Use a more efficient method to fetch the record given by
- table->record[0] if the engine allows it. We first compute a
- row reference using the position() member function (it will be
- stored in table->file->ref) and the use rnd_pos() to position
- the "cursor" (i.e., record[0] in this case) at the correct row.
-
- TODO: Add a check that the correct record has been fetched by
- comparing with the original record. Take into account that the
- record on the master and slave can be of different
- length. Something along these lines should work:
-
- ADD>>> store_record(table,record[1]);
- int error= table->file->ha_rnd_pos(table->record[0], table->file->ref);
- ADD>>> DBUG_ASSERT(memcmp(table->record[1], table->record[0],
- table->s->reclength) == 0);
-
- */
- DBUG_PRINT("info",("locating record using primary key (position)"));
- int error= table->file->ha_rnd_pos_by_record(table->record[0]);
- if (unlikely(error))
- {
- DBUG_PRINT("info",("rnd_pos returns error %d",error));
- table->file->print_error(error, MYF(0));
- }
- DBUG_RETURN(error);
- }
-
- // We can't use position() - try other methods.
-
- /*
- We need to retrieve all fields
- TODO: Move this out from this function to main loop
- */
- table->use_all_columns();
-
- /*
- Save copy of the record in table->record[1]. It might be needed
- later if linear search is used to find exact match.
- */
- store_record(table,record[1]);
-
- if (table->s->keys > 0)
- {
- DBUG_PRINT("info",("locating record using primary key (index_read)"));
-
- /* We have a key: search the table using the index */
- if (!table->file->inited &&
- unlikely(error= table->file->ha_index_init(0, FALSE)))
- {
- DBUG_PRINT("info",("ha_index_init returns error %d",error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
-
- /* Fill key data for the row */
-
- DBUG_ASSERT(m_key);
- key_copy(m_key, table->record[0], table->key_info, 0);
-
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
-#ifndef HAVE_valgrind
- DBUG_DUMP("key data", m_key, table->key_info->key_length);
-#endif
-
- /*
- We need to set the null bytes to ensure that the filler bit are
- all set when returning. There are storage engines that just set
- the necessary bits on the bytes and don't set the filler bits
- correctly.
- */
- my_ptrdiff_t const pos=
- table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
- table->record[0][pos]= 0xFF;
-
- if (unlikely((error= table->file->ha_index_read_map(table->record[0],
- m_key,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))))
- {
- DBUG_PRINT("info",("no record matching the key found in the table"));
- table->file->print_error(error, MYF(0));
- table->file->ha_index_end();
- DBUG_RETURN(error);
- }
-
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
-#ifndef HAVE_valgrind
- DBUG_PRINT("info",("found first matching record"));
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
-#endif
- /*
- Below is a minor "optimization". If the key (i.e., key number
- 0) has the HA_NOSAME flag set, we know that we have found the
- correct record (since there can be no duplicates); otherwise, we
- have to compare the record with the one found to see if it is
- the correct one.
-
- CAVEAT! This behaviour is essential for the replication of,
- e.g., the mysql.proc table since the correct record *shall* be
- found using the primary key *only*. There shall be no
- comparison of non-PK columns to decide if the correct record is
- found. I can see no scenario where it would be incorrect to
- chose the row to change only using a PK or an UNNI.
- */
- if (table->key_info->flags & HA_NOSAME)
- {
- /* Unique does not have non nullable part */
- if (!(table->key_info->flags & (HA_NULL_PART_KEY)))
- {
- DBUG_RETURN(0);
- }
- else
- {
- KEY *keyinfo= table->key_info;
- /*
- Unique has nullable part. We need to check if there is any
- field in the BI image that is null and part of UNNI.
- */
- bool null_found= FALSE;
- for (uint i=0; i < keyinfo->user_defined_key_parts && !null_found; i++)
- {
- uint fieldnr= keyinfo->key_part[i].fieldnr - 1;
- Field **f= table->field+fieldnr;
- null_found= (*f)->is_null();
- }
-
- if (!null_found)
- {
- DBUG_RETURN(0);
- }
-
- /* else fall through to index scan */
- }
- }
-
- /*
- In case key is not unique, we still have to iterate over records found
- and find the one which is identical to the row given. A copy of the
- record we are looking for is stored in record[1].
- */
- DBUG_PRINT("info",("non-unique index, scanning it to find matching record"));
-
- while (record_compare(table))
- {
- while (unlikely(error= table->file->ha_index_next(table->record[0])))
- {
- DBUG_PRINT("info",("no record matching the given row found"));
- table->file->print_error(error, MYF(0));
- (void) table->file->ha_index_end();
- DBUG_RETURN(error);
- }
- }
- }
- else
- {
- DBUG_PRINT("info",("locating record using table scan (rnd_next)"));
-
- int restart_count= 0; // Number of times scanning has restarted from top
-
- /* We don't have a key: search the table using rnd_next() */
- if (unlikely((error= table->file->ha_rnd_init_with_error(1))))
- {
- DBUG_PRINT("info",("error initializing table scan"
- " (ha_rnd_init returns %d)",error));
- DBUG_RETURN(error);
- }
-
- /* Continue until we find the right record or have made a full loop */
- do
- {
- restart_rnd_next:
- error= table->file->ha_rnd_next(table->record[0]);
-
- switch (error) {
-
- case 0:
- break;
-
- case HA_ERR_END_OF_FILE:
- if (++restart_count < 2)
- {
- int error2;
- table->file->ha_rnd_end();
- if (unlikely((error2= table->file->ha_rnd_init_with_error(1))))
- DBUG_RETURN(error2);
- goto restart_rnd_next;
- }
- break;
-
- default:
- DBUG_PRINT("info", ("Failed to get next record"
- " (rnd_next returns %d)",error));
- table->file->print_error(error, MYF(0));
- table->file->ha_rnd_end();
- DBUG_RETURN(error);
- }
- }
- while (restart_count < 2 && record_compare(table));
-
- /*
- Note: above record_compare will take into accout all record fields
- which might be incorrect in case a partial row was given in the event
- */
-
- /*
- Have to restart the scan to be able to fetch the next row.
- */
- if (restart_count == 2)
- DBUG_PRINT("info", ("Record not found"));
- else
- DBUG_DUMP("record found", table->record[0], table->s->reclength);
- if (error)
- table->file->ha_rnd_end();
-
- DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0);
- DBUG_RETURN(error);
- }
-
- DBUG_RETURN(0);
-}
-
-#endif
-
-
-/**************************************************************************
- Write_rows_log_event member functions
-**************************************************************************/
-
-/*
- Constructor used to build an event for writing to the binary log.
- */
-#if !defined(MYSQL_CLIENT)
-Write_rows_log_event_old::Write_rows_log_event_old(THD *thd_arg,
- TABLE *tbl_arg,
- ulong tid_arg,
- MY_BITMAP const *cols,
- bool is_transactional)
- : Old_rows_log_event(thd_arg, tbl_arg, tid_arg, cols, is_transactional)
-{
-
- // This constructor should not be reached.
- assert(0);
-
-}
-#endif
-
-
-/*
- Constructor used by slave to read the event from the binary log.
- */
-#ifdef HAVE_REPLICATION
-Write_rows_log_event_old::Write_rows_log_event_old(const uchar *buf,
- uint event_len,
- const Format_description_log_event
- *description_event)
-: Old_rows_log_event(buf, event_len, PRE_GA_WRITE_ROWS_EVENT,
- description_event)
-{
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-int
-Write_rows_log_event_old::do_before_row_operations(const Slave_reporting_capability *const)
-{
- int error= 0;
-
- /*
- We are using REPLACE semantics and not INSERT IGNORE semantics
- when writing rows, that is: new rows replace old rows. We need to
- inform the storage engine that it should use this behaviour.
- */
-
- /* Tell the storage engine that we are using REPLACE semantics. */
- thd->lex->duplicates= DUP_REPLACE;
-
- thd->lex->sql_command= SQLCOM_REPLACE;
- /*
- Do not raise the error flag in case of hitting to an unique attribute
- */
- m_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- m_table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
- m_table->file->extra(HA_EXTRA_IGNORE_NO_KEY);
- m_table->file->ha_start_bulk_insert(0);
- return error;
-}
-
-
-int
-Write_rows_log_event_old::do_after_row_operations(const Slave_reporting_capability *const,
- int error)
-{
- int local_error= 0;
- m_table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
- m_table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
- /*
- resetting the extra with
- table->file->extra(HA_EXTRA_NO_IGNORE_NO_KEY);
- fires bug#27077
- todo: explain or fix
- */
- if (unlikely((local_error= m_table->file->ha_end_bulk_insert())))
- {
- m_table->file->print_error(local_error, MYF(0));
- }
- return error? error : local_error;
-}
-
-
-int
-Write_rows_log_event_old::do_exec_row(rpl_group_info *rgi)
-{
- DBUG_ASSERT(m_table != NULL);
- int error= write_row(rgi, TRUE /* overwrite */);
-
- if (unlikely(error) && !thd->net.last_errno)
- thd->net.last_errno= error;
-
- return error;
-}
-
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-
-
-#ifdef MYSQL_CLIENT
-bool Write_rows_log_event_old::print(FILE *file,
- PRINT_EVENT_INFO* print_event_info)
-{
- return Old_rows_log_event::print_helper(file, print_event_info,
- "Write_rows_old");
-}
-#endif
-
-
-/**************************************************************************
- Delete_rows_log_event member functions
-**************************************************************************/
-
-/*
- Constructor used to build an event for writing to the binary log.
- */
-
-#ifndef MYSQL_CLIENT
-Delete_rows_log_event_old::Delete_rows_log_event_old(THD *thd_arg,
- TABLE *tbl_arg,
- ulong tid,
- MY_BITMAP const *cols,
- bool is_transactional)
- : Old_rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional),
- m_after_image(NULL), m_memory(NULL)
-{
-
- // This constructor should not be reached.
- assert(0);
-
-}
-#endif /* #if !defined(MYSQL_CLIENT) */
-
-
-/*
- Constructor used by slave to read the event from the binary log.
- */
-#ifdef HAVE_REPLICATION
-Delete_rows_log_event_old::
-Delete_rows_log_event_old(const uchar *buf,
- uint event_len,
- const Format_description_log_event
- *description_event)
- :Old_rows_log_event(buf, event_len, PRE_GA_DELETE_ROWS_EVENT,
- description_event),
- m_after_image(NULL), m_memory(NULL)
-{
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-
-int Delete_rows_log_event_old::
-do_before_row_operations(const Slave_reporting_capability *const)
-{
- if ((m_table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
- m_table->s->primary_key < MAX_KEY)
- {
- /*
- We don't need to allocate any memory for m_key since it is not used.
- */
- return 0;
- }
-
- if (m_table->s->keys > 0)
- {
- // Allocate buffer for key searches
- m_key= (uchar*)my_malloc(key_memory_log_event_old, m_table->key_info->key_length, MYF(MY_WME));
- if (!m_key)
- return HA_ERR_OUT_OF_MEM;
- }
- return 0;
-}
-
-
-int
-Delete_rows_log_event_old::do_after_row_operations(const Slave_reporting_capability *const,
- int error)
-{
- /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
- m_table->file->ha_index_or_rnd_end();
- my_free(m_key);
- m_key= NULL;
-
- return error;
-}
-
-
-int Delete_rows_log_event_old::do_exec_row(rpl_group_info *rgi)
-{
- int error;
- DBUG_ASSERT(m_table != NULL);
-
- if (likely(!(error= find_row(rgi))) )
- {
- /*
- Delete the record found, located in record[0]
- */
- error= m_table->file->ha_delete_row(m_table->record[0]);
- m_table->file->ha_index_or_rnd_end();
- }
- return error;
-}
-
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-
-
-#ifdef MYSQL_CLIENT
-bool Delete_rows_log_event_old::print(FILE *file,
- PRINT_EVENT_INFO* print_event_info)
-{
- return Old_rows_log_event::print_helper(file, print_event_info,
- "Delete_rows_old");
-}
-#endif
-
-
-/**************************************************************************
- Update_rows_log_event member functions
-**************************************************************************/
-
-/*
- Constructor used to build an event for writing to the binary log.
- */
-#if !defined(MYSQL_CLIENT)
-Update_rows_log_event_old::Update_rows_log_event_old(THD *thd_arg,
- TABLE *tbl_arg,
- ulong tid,
- MY_BITMAP const *cols,
- bool is_transactional)
- : Old_rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional),
- m_after_image(NULL), m_memory(NULL)
-{
-
- // This constructor should not be reached.
- assert(0);
-}
-#endif /* !defined(MYSQL_CLIENT) */
-
-
-/*
- Constructor used by slave to read the event from the binary log.
- */
-#ifdef HAVE_REPLICATION
-Update_rows_log_event_old::Update_rows_log_event_old(const uchar *buf,
- uint event_len,
- const
- Format_description_log_event
- *description_event)
- : Old_rows_log_event(buf, event_len, PRE_GA_UPDATE_ROWS_EVENT,
- description_event),
- m_after_image(NULL), m_memory(NULL)
-{
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-
-int
-Update_rows_log_event_old::
-do_before_row_operations(const Slave_reporting_capability *const)
-{
- if (m_table->s->keys > 0)
- {
- // Allocate buffer for key searches
- m_key= (uchar*)my_malloc(key_memory_log_event_old,
- m_table->key_info->key_length, MYF(MY_WME));
- if (!m_key)
- return HA_ERR_OUT_OF_MEM;
- }
-
- return 0;
-}
-
-
-int
-Update_rows_log_event_old::
-do_after_row_operations(const Slave_reporting_capability *const, int error)
-{
- /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
- m_table->file->ha_index_or_rnd_end();
- my_free(m_key); // Free for multi_malloc
- m_key= NULL;
-
- return error;
-}
-
-
-int
-Update_rows_log_event_old::do_exec_row(rpl_group_info *rgi)
-{
- DBUG_ASSERT(m_table != NULL);
-
- int error= find_row(rgi);
- if (unlikely(error))
- {
- /*
- We need to read the second image in the event of error to be
- able to skip to the next pair of updates
- */
- m_curr_row= m_curr_row_end;
- unpack_current_row(rgi);
- return error;
- }
-
- /*
- This is the situation after locating BI:
-
- ===|=== before image ====|=== after image ===|===
- ^ ^
- m_curr_row m_curr_row_end
-
- BI found in the table is stored in record[0]. We copy it to record[1]
- and unpack AI to record[0].
- */
-
- store_record(m_table,record[1]);
-
- m_curr_row= m_curr_row_end;
- error= unpack_current_row(rgi); // this also updates m_curr_row_end
-
- /*
- Now we have the right row to update. The old row (the one we're
- looking for) is in record[1] and the new row is in record[0].
- */
-#ifndef HAVE_valgrind
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
- DBUG_PRINT("info",("Updating row in table"));
- DBUG_DUMP("old record", m_table->record[1], m_table->s->reclength);
- DBUG_DUMP("new values", m_table->record[0], m_table->s->reclength);
-#endif
-
- error= m_table->file->ha_update_row(m_table->record[1], m_table->record[0]);
- m_table->file->ha_index_or_rnd_end();
-
- if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
- error= 0;
-
- return error;
-}
-
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-
-
-#ifdef MYSQL_CLIENT
-bool Update_rows_log_event_old::print(FILE *file,
- PRINT_EVENT_INFO* print_event_info)
-{
- return Old_rows_log_event::print_helper(file, print_event_info,
- "Update_rows_old");
-}
-#endif
diff --git a/sql/log_event_old.h b/sql/log_event_old.h
deleted file mode 100644
index e5aaacec209..00000000000
--- a/sql/log_event_old.h
+++ /dev/null
@@ -1,569 +0,0 @@
-/* Copyright (c) 2007, 2013, Oracle and/or its affiliates.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
-
-#ifndef LOG_EVENT_OLD_H
-#define LOG_EVENT_OLD_H
-
-/*
- Need to include this file at the proper position of log_event.h
- */
-
-
-/**
- @file
-
- @brief This file contains classes handling old formats of row-based
- binlog events.
-*/
-/*
- Around 2007-10-31, I made these classes completely separated from
- the new classes (before, there was a complex class hierarchy
- involving multiple inheritance; see BUG#31581), by simply copying
- and pasting the entire contents of Rows_log_event into
- Old_rows_log_event and the entire contents of
- {Write|Update|Delete}_rows_log_event into
- {Write|Update|Delete}_rows_log_event_old. For clarity, I will keep
- the comments marking which code was cut-and-pasted for some time.
- With the classes collapsed into one, there is probably some
- redundancy (maybe some methods can be simplified and/or removed),
- but we keep them this way for now. /Sven
-*/
-
-/* These classes are based on the v1 RowsHeaderLen */
-#undef ROWS_HEADER_LEN
-#define ROWS_HEADER_LEN ROWS_HEADER_LEN_V1
-
-/**
- @class Old_rows_log_event
-
- Base class for the three types of row-based events
- {Write|Update|Delete}_row_log_event_old, with event type codes
- PRE_GA_{WRITE|UPDATE|DELETE}_ROWS_EVENT. These events are never
- created any more, except when reading a relay log created by an old
- server.
-*/
-class Old_rows_log_event : public Log_event
-{
- /********** BEGIN CUT & PASTE FROM Rows_log_event **********/
-public:
- /**
- Enumeration of the errors that can be returned.
- */
- enum enum_error
- {
- ERR_OPEN_FAILURE = -1, /**< Failure to open table */
- ERR_OK = 0, /**< No error */
- ERR_TABLE_LIMIT_EXCEEDED = 1, /**< No more room for tables */
- ERR_OUT_OF_MEM = 2, /**< Out of memory */
- ERR_BAD_TABLE_DEF = 3, /**< Table definition does not match */
- ERR_RBR_TO_SBR = 4 /**< daisy-chanining RBR to SBR not allowed */
- };
-
- /*
- These definitions allow you to combine the flags into an
- appropriate flag set using the normal bitwise operators. The
- implicit conversion from an enum-constant to an integer is
- accepted by the compiler, which is then used to set the real set
- of flags.
- */
- enum enum_flag
- {
- /* Last event of a statement */
- STMT_END_F = (1U << 0),
-
- /* Value of the OPTION_NO_FOREIGN_KEY_CHECKS flag in thd->options */
- NO_FOREIGN_KEY_CHECKS_F = (1U << 1),
-
- /* Value of the OPTION_RELAXED_UNIQUE_CHECKS flag in thd->options */
- RELAXED_UNIQUE_CHECKS_F = (1U << 2),
-
- /**
- Indicates that rows in this event are complete, that is contain
- values for all columns of the table.
- */
- COMPLETE_ROWS_F = (1U << 3)
- };
-
- typedef uint16 flag_set;
-
- /* Special constants representing sets of flags */
- enum
- {
- RLE_NO_FLAGS = 0U
- };
-
- virtual ~Old_rows_log_event();
-
- void set_flags(flag_set flags_arg) { m_flags |= flags_arg; }
- void clear_flags(flag_set flags_arg) { m_flags &= ~flags_arg; }
- flag_set get_flags(flag_set flags_arg) const { return m_flags & flags_arg; }
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- virtual void pack_info(Protocol *protocol);
-#endif
-
-#ifdef MYSQL_CLIENT
- /* not for direct call, each derived has its own ::print() */
- virtual bool print(FILE *file, PRINT_EVENT_INFO *print_event_info)= 0;
-#endif
-
-#ifndef MYSQL_CLIENT
- int add_row_data(uchar *data, size_t length)
- {
- return do_add_row_data(data,length);
- }
-#endif
-
- /* Member functions to implement superclass interface */
- virtual int get_data_size();
-
- MY_BITMAP const *get_cols() const { return &m_cols; }
- size_t get_width() const { return m_width; }
- ulong get_table_id() const { return m_table_id; }
-
-#ifndef MYSQL_CLIENT
- virtual bool write_data_header();
- virtual bool write_data_body();
- virtual const char *get_db() { return m_table->s->db.str; }
-#endif
- /*
- Check that malloc() succeeded in allocating memory for the rows
- buffer and the COLS vector. Checking that an Update_rows_log_event_old
- is valid is done in the Update_rows_log_event_old::is_valid()
- function.
- */
- virtual bool is_valid() const
- {
- return m_rows_buf && m_cols.bitmap;
- }
- bool is_part_of_group() { return 1; }
-
- uint m_row_count; /* The number of rows added to the event */
-
-protected:
- /*
- The constructors are protected since you're supposed to inherit
- this class, not create instances of this class.
- */
-#ifndef MYSQL_CLIENT
- Old_rows_log_event(THD*, TABLE*, ulong table_id,
- MY_BITMAP const *cols, bool is_transactional);
-#endif
- Old_rows_log_event(const uchar *row_data, uint event_len,
- Log_event_type event_type,
- const Format_description_log_event *description_event);
-
-#ifdef MYSQL_CLIENT
- bool print_helper(FILE *, PRINT_EVENT_INFO *, char const *const name);
-#endif
-
-#ifndef MYSQL_CLIENT
- virtual int do_add_row_data(uchar *data, size_t length);
-#endif
-
-#ifndef MYSQL_CLIENT
- TABLE *m_table; /* The table the rows belong to */
-#endif
- ulong m_table_id; /* Table ID */
- MY_BITMAP m_cols; /* Bitmap denoting columns available */
- ulong m_width; /* The width of the columns bitmap */
-
- ulong m_master_reclength; /* Length of record on master side */
-
- /* Bit buffers in the same memory as the class */
- uint32 m_bitbuf[128/(sizeof(uint32)*8)];
- uint32 m_bitbuf_ai[128/(sizeof(uint32)*8)];
-
- uchar *m_rows_buf; /* The rows in packed format */
- uchar *m_rows_cur; /* One-after the end of the data */
- uchar *m_rows_end; /* One-after the end of the allocated space */
-
- flag_set m_flags; /* Flags for row-level events */
-
- /* helper functions */
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- const uchar *m_curr_row; /* Start of the row being processed */
- const uchar *m_curr_row_end; /* One-after the end of the current row */
- uchar *m_key; /* Buffer to keep key value during searches */
-
- int find_row(rpl_group_info *);
- int write_row(rpl_group_info *, const bool);
-
- // Unpack the current row into m_table->record[0]
- int unpack_current_row(rpl_group_info *rgi)
- {
- DBUG_ASSERT(m_table);
- ASSERT_OR_RETURN_ERROR(m_curr_row < m_rows_end, HA_ERR_CORRUPT_EVENT);
- return ::unpack_row(rgi, m_table, m_width, m_curr_row, &m_cols,
- &m_curr_row_end, &m_master_reclength, m_rows_end);
- }
-#endif
-
-private:
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- virtual int do_apply_event(rpl_group_info *rgi);
- virtual int do_update_pos(rpl_group_info *rgi);
- virtual enum_skip_reason do_shall_skip(rpl_group_info *rgi);
-
- /*
- Primitive to prepare for a sequence of row executions.
-
- DESCRIPTION
-
- Before doing a sequence of do_prepare_row() and do_exec_row()
- calls, this member function should be called to prepare for the
- entire sequence. Typically, this member function will allocate
- space for any buffers that are needed for the two member
- functions mentioned above.
-
- RETURN VALUE
-
- The member function will return 0 if all went OK, or a non-zero
- error code otherwise.
- */
- virtual
- int do_before_row_operations(const Slave_reporting_capability *const log) = 0;
-
- /*
- Primitive to clean up after a sequence of row executions.
-
- DESCRIPTION
-
- After doing a sequence of do_prepare_row() and do_exec_row(),
- this member function should be called to clean up and release
- any allocated buffers.
-
- The error argument, if non-zero, indicates an error which happened during
- row processing before this function was called. In this case, even if
- function is successful, it should return the error code given in the argument.
- */
- virtual
- int do_after_row_operations(const Slave_reporting_capability *const log,
- int error) = 0;
-
- /*
- Primitive to do the actual execution necessary for a row.
-
- DESCRIPTION
- The member function will do the actual execution needed to handle a row.
- The row is located at m_curr_row. When the function returns,
- m_curr_row_end should point at the next row (one byte after the end
- of the current row).
-
- RETURN VALUE
- 0 if execution succeeded, 1 if execution failed.
-
- */
- virtual int do_exec_row(rpl_group_info *rgi) = 0;
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-
- /********** END OF CUT & PASTE FROM Rows_log_event **********/
- protected:
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-
- int do_apply_event(Old_rows_log_event*, rpl_group_info *rgi);
-
- /*
- Primitive to prepare for a sequence of row executions.
-
- DESCRIPTION
-
- Before doing a sequence of do_prepare_row() and do_exec_row()
- calls, this member function should be called to prepare for the
- entire sequence. Typically, this member function will allocate
- space for any buffers that are needed for the two member
- functions mentioned above.
-
- RETURN VALUE
-
- The member function will return 0 if all went OK, or a non-zero
- error code otherwise.
- */
- virtual int do_before_row_operations(TABLE *table) = 0;
-
- /*
- Primitive to clean up after a sequence of row executions.
-
- DESCRIPTION
-
- After doing a sequence of do_prepare_row() and do_exec_row(),
- this member function should be called to clean up and release
- any allocated buffers.
- */
- virtual int do_after_row_operations(TABLE *table, int error) = 0;
-
- /*
- Primitive to prepare for handling one row in a row-level event.
-
- DESCRIPTION
-
- The member function prepares for execution of operations needed for one
- row in a row-level event by reading up data from the buffer containing
- the row. No specific interpretation of the data is normally done here,
- since SQL thread specific data is not available: that data is made
- available for the do_exec function.
-
- A pointer to the start of the next row, or NULL if the preparation
- failed. Currently, preparation cannot fail, but don't rely on this
- behavior.
-
- RETURN VALUE
- Error code, if something went wrong, 0 otherwise.
- */
- virtual int do_prepare_row(THD*, rpl_group_info*, TABLE*,
- uchar const *row_start,
- uchar const **row_end) = 0;
-
- /*
- Primitive to do the actual execution necessary for a row.
-
- DESCRIPTION
- The member function will do the actual execution needed to handle a row.
-
- RETURN VALUE
- 0 if execution succeeded, 1 if execution failed.
-
- */
- virtual int do_exec_row(TABLE *table) = 0;
-
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-};
-
-
-/**
- @class Write_rows_log_event_old
-
- Old class for binlog events that write new rows to a table (event
- type code PRE_GA_WRITE_ROWS_EVENT). Such events are never produced
- by this version of the server, but they may be read from a relay log
- created by an old server. New servers create events of class
- Write_rows_log_event (event type code WRITE_ROWS_EVENT) instead.
-*/
-class Write_rows_log_event_old : public Old_rows_log_event
-{
- /********** BEGIN CUT & PASTE FROM Write_rows_log_event **********/
-public:
-#if !defined(MYSQL_CLIENT)
- Write_rows_log_event_old(THD*, TABLE*, ulong table_id,
- MY_BITMAP const *cols, bool is_transactional);
-#endif
-#ifdef HAVE_REPLICATION
- Write_rows_log_event_old(const uchar *buf, uint event_len,
- const Format_description_log_event *description_event);
-#endif
-#if !defined(MYSQL_CLIENT)
- static bool binlog_row_logging_function(THD *thd, TABLE *table,
- bool is_transactional,
- const uchar *before_record
- __attribute__((unused)),
- const uchar *after_record)
- {
- return thd->binlog_write_row(table, is_transactional, after_record);
- }
-#endif
-
-private:
-#ifdef MYSQL_CLIENT
- bool print(FILE *file, PRINT_EVENT_INFO *print_event_info);
-#endif
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- virtual int do_before_row_operations(const Slave_reporting_capability *const);
- virtual int do_after_row_operations(const Slave_reporting_capability *const,int);
- virtual int do_exec_row(rpl_group_info *);
-#endif
- /********** END OF CUT & PASTE FROM Write_rows_log_event **********/
-
-public:
- enum
- {
- /* Support interface to THD::binlog_prepare_pending_rows_event */
- TYPE_CODE = PRE_GA_WRITE_ROWS_EVENT
- };
-
-private:
- virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- // use old definition of do_apply_event()
- virtual int do_apply_event(rpl_group_info *rgi)
- { return Old_rows_log_event::do_apply_event(this, rgi); }
-
- // primitives for old version of do_apply_event()
- virtual int do_before_row_operations(TABLE *table);
- virtual int do_after_row_operations(TABLE *table, int error);
- virtual int do_prepare_row(THD*, rpl_group_info*, TABLE*,
- uchar const *row_start, uchar const **row_end);
- virtual int do_exec_row(TABLE *table);
-
-#endif
-};
-
-
-/**
- @class Update_rows_log_event_old
-
- Old class for binlog events that modify existing rows to a table
- (event type code PRE_GA_UPDATE_ROWS_EVENT). Such events are never
- produced by this version of the server, but they may be read from a
- relay log created by an old server. New servers create events of
- class Update_rows_log_event (event type code UPDATE_ROWS_EVENT)
- instead.
-*/
-class Update_rows_log_event_old : public Old_rows_log_event
-{
- /********** BEGIN CUT & PASTE FROM Update_rows_log_event **********/
-public:
-#ifndef MYSQL_CLIENT
- Update_rows_log_event_old(THD*, TABLE*, ulong table_id,
- MY_BITMAP const *cols,
- bool is_transactional);
-#endif
-
-#ifdef HAVE_REPLICATION
- Update_rows_log_event_old(const uchar *buf, uint event_len,
- const Format_description_log_event *description_event);
-#endif
-
-#if !defined(MYSQL_CLIENT)
- static bool binlog_row_logging_function(THD *thd, TABLE *table,
- bool is_transactional,
- MY_BITMAP *cols,
- uint fields,
- const uchar *before_record,
- const uchar *after_record)
- {
- return thd->binlog_update_row(table, is_transactional,
- before_record, after_record);
- }
-#endif
-
-protected:
-#ifdef MYSQL_CLIENT
- bool print(FILE *file, PRINT_EVENT_INFO *print_event_info);
-#endif
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- virtual int do_before_row_operations(const Slave_reporting_capability *const);
- virtual int do_after_row_operations(const Slave_reporting_capability *const,int);
- virtual int do_exec_row(rpl_group_info *);
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
- /********** END OF CUT & PASTE FROM Update_rows_log_event **********/
-
- uchar *m_after_image, *m_memory;
-
-public:
- enum
- {
- /* Support interface to THD::binlog_prepare_pending_rows_event */
- TYPE_CODE = PRE_GA_UPDATE_ROWS_EVENT
- };
-
-private:
- virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- // use old definition of do_apply_event()
- virtual int do_apply_event(rpl_group_info *rgi)
- { return Old_rows_log_event::do_apply_event(this, rgi); }
-
- // primitives for old version of do_apply_event()
- virtual int do_before_row_operations(TABLE *table);
- virtual int do_after_row_operations(TABLE *table, int error);
- virtual int do_prepare_row(THD*, rpl_group_info*, TABLE*,
- uchar const *row_start, uchar const **row_end);
- virtual int do_exec_row(TABLE *table);
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-};
-
-
-/**
- @class Delete_rows_log_event_old
-
- Old class for binlog events that delete existing rows from a table
- (event type code PRE_GA_DELETE_ROWS_EVENT). Such events are never
- produced by this version of the server, but they may be read from a
- relay log created by an old server. New servers create events of
- class Delete_rows_log_event (event type code DELETE_ROWS_EVENT)
- instead.
-*/
-class Delete_rows_log_event_old : public Old_rows_log_event
-{
- /********** BEGIN CUT & PASTE FROM Update_rows_log_event **********/
-public:
-#ifndef MYSQL_CLIENT
- Delete_rows_log_event_old(THD*, TABLE*, ulong,
- MY_BITMAP const *cols, bool is_transactional);
-#endif
-#ifdef HAVE_REPLICATION
- Delete_rows_log_event_old(const uchar *buf, uint event_len,
- const Format_description_log_event *description_event);
-#endif
-#if !defined(MYSQL_CLIENT)
- static bool binlog_row_logging_function(THD *thd, TABLE *table,
- bool is_transactional,
- MY_BITMAP *cols,
- uint fields,
- const uchar *before_record,
- const uchar *after_record
- __attribute__((unused)))
- {
- return thd->binlog_delete_row(table, is_transactional, before_record);
- }
-#endif
-
-protected:
-#ifdef MYSQL_CLIENT
- bool print(FILE *file, PRINT_EVENT_INFO *print_event_info);
-#endif
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- virtual int do_before_row_operations(const Slave_reporting_capability *const);
- virtual int do_after_row_operations(const Slave_reporting_capability *const,int);
- virtual int do_exec_row(rpl_group_info *);
-#endif
- /********** END CUT & PASTE FROM Delete_rows_log_event **********/
-
- uchar *m_after_image, *m_memory;
-
-public:
- enum
- {
- /* Support interface to THD::binlog_prepare_pending_rows_event */
- TYPE_CODE = PRE_GA_DELETE_ROWS_EVENT
- };
-
-private:
- virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- // use old definition of do_apply_event()
- virtual int do_apply_event(rpl_group_info *rgi)
- { return Old_rows_log_event::do_apply_event(this, rgi); }
-
- // primitives for old version of do_apply_event()
- virtual int do_before_row_operations(TABLE *table);
- virtual int do_after_row_operations(TABLE *table, int error);
- virtual int do_prepare_row(THD*, rpl_group_info*, TABLE*,
- uchar const *row_start, uchar const **row_end);
- virtual int do_exec_row(TABLE *table);
-#endif
-};
-
-
-#endif
diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc
index 535a2cf93de..3910d910da1 100644
--- a/sql/log_event_server.cc
+++ b/sql/log_event_server.cc
@@ -354,37 +354,6 @@ inline bool unexpected_error_code(int unexpected_error)
}
}
-/*
- pretty_print_str()
-*/
-
-static void
-pretty_print_str(String *packet, const char *str, int len)
-{
- const char *end= str + len;
- packet->append(STRING_WITH_LEN("'"));
- while (str < end)
- {
- char c;
- switch ((c=*str++)) {
- case '\n': packet->append(STRING_WITH_LEN("\\n")); break;
- case '\r': packet->append(STRING_WITH_LEN("\\r")); break;
- case '\\': packet->append(STRING_WITH_LEN("\\\\")); break;
- case '\b': packet->append(STRING_WITH_LEN("\\b")); break;
- case '\t': packet->append(STRING_WITH_LEN("\\t")); break;
- case '\'': packet->append(STRING_WITH_LEN("\\'")); break;
- case 0 : packet->append(STRING_WITH_LEN("\\0")); break;
- default:
- packet->append(&c, 1);
- break;
- }
- }
- packet->append(STRING_WITH_LEN("'"));
-}
-#endif /* HAVE_REPLICATION */
-
-
-#if defined(HAVE_REPLICATION)
/**
Create a prefix for the temporary files that is to be used for
@@ -607,29 +576,17 @@ int Log_event::do_update_pos(rpl_group_info *rgi)
Relay_log_info *rli= rgi->rli;
DBUG_ENTER("Log_event::do_update_pos");
+ DBUG_ASSERT(rli);
DBUG_ASSERT(!rli->belongs_to_client());
+
/*
- rli is null when (as far as I (Guilhem) know) the caller is
- Load_log_event::do_apply_event *and* that one is called from
- Execute_load_log_event::do_apply_event. In this case, we don't
- do anything here ; Execute_load_log_event::do_apply_event will
- call Log_event::do_apply_event again later with the proper rli.
- Strictly speaking, if we were sure that rli is null only in the
- case discussed above, 'if (rli)' is useless here. But as we are
- not 100% sure, keep it for now.
-
- Matz: I don't think we will need this check with this refactoring.
+ In parallel execution, delay position update for the events that are
+ not part of event groups (format description, rotate, and such) until
+ the actual event execution reaches that point.
*/
- if (rli)
- {
- /*
- In parallel execution, delay position update for the events that are
- not part of event groups (format description, rotate, and such) until
- the actual event execution reaches that point.
- */
- if (!rgi->is_parallel_exec || is_group_event(get_type_code()))
- rli->stmt_done(log_pos, thd, rgi);
- }
+ if (!rgi->is_parallel_exec || is_group_event(get_type_code()))
+ rli->stmt_done(log_pos, thd, rgi);
+
DBUG_RETURN(0); // Cannot fail currently
}
@@ -1227,18 +1184,6 @@ bool Query_log_event::write()
int8store(start, table_map_for_update);
start+= 8;
}
- if (master_data_written != 0)
- {
- /*
- Q_MASTER_DATA_WRITTEN_CODE only exists in relay logs where the master
- has binlog_version<4 and the slave has binlog_version=4. See comment
- for master_data_written in log_event.h for details.
- */
- *start++= Q_MASTER_DATA_WRITTEN_CODE;
- int4store(start, master_data_written);
- start+= 4;
- }
-
if (thd && thd->need_binlog_invoker())
{
LEX_CSTRING user;
@@ -1434,7 +1379,6 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
lc_time_names_number(thd_arg->variables.lc_time_names->number),
charset_database_number(0),
table_map_for_update((ulonglong)thd_arg->table_map_for_update),
- master_data_written(0),
gtid_flags_extra(thd_arg->get_binlog_flags_for_alter()),
sa_seq_no(0)
{
@@ -2275,7 +2219,7 @@ compare_errors:
expected_error,
actual_error ? thd->get_stmt_da()->message() : "no error",
actual_error,
- print_slave_db_safe(db), query_arg);
+ safe_str(db), query_arg);
thd->is_slave_error= 1;
}
/*
@@ -2465,23 +2409,11 @@ Query_log_event::peek_is_commit_rollback(const uchar *event_start,
!memcmp(event_start + (event_len-9), "\0ROLLBACK", 9);
}
-#endif
-
-
-/**************************************************************************
- Start_log_event_v3 methods
-**************************************************************************/
-
-Start_log_event_v3::Start_log_event_v3()
- :Log_event(), created(0), binlog_version(BINLOG_VERSION),
- dont_set_created(0)
-{
- memcpy(server_version, ::server_version, ST_SERVER_VER_LEN);
-}
-
+/***************************************************************************
+ Format_description_log_event methods
+****************************************************************************/
-#if defined(HAVE_REPLICATION)
-void Start_log_event_v3::pack_info(Protocol *protocol)
+void Format_description_log_event::pack_info(Protocol *protocol)
{
char buf[12 + ST_SERVER_VER_LEN + 14 + 22], *pos;
pos= strmov(buf, "Server ver: ");
@@ -2490,115 +2422,14 @@ void Start_log_event_v3::pack_info(Protocol *protocol)
pos= int10_to_str(binlog_version, pos, 10);
protocol->store(buf, (uint) (pos-buf), &my_charset_bin);
}
-#endif
-
-
-bool Start_log_event_v3::write()
-{
- char buff[START_V3_HEADER_LEN];
- int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version);
- memcpy(buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN);
- if (!dont_set_created)
- created= get_time(); // this sets when and when_sec_part as a side effect
- int4store(buff + ST_CREATED_OFFSET,created);
- return write_header(sizeof(buff)) ||
- write_data(buff, sizeof(buff)) ||
- write_footer();
-}
-
-
-#if defined(HAVE_REPLICATION)
-
-/**
- Start_log_event_v3::do_apply_event() .
- The master started
-
- IMPLEMENTATION
- - To handle the case where the master died without having time to write
- DROP TEMPORARY TABLE, DO RELEASE_LOCK (prepared statements' deletion is
- TODO), we clean up all temporary tables that we got, if we are sure we
- can (see below).
-
- @todo
- - Remove all active user locks.
- Guilhem 2003-06: this is true but not urgent: the worst it can cause is
- the use of a bit of memory for a user lock which will not be used
- anymore. If the user lock is later used, the old one will be released. In
- other words, no deadlock problem.
-*/
-
-int Start_log_event_v3::do_apply_event(rpl_group_info *rgi)
-{
- DBUG_ENTER("Start_log_event_v3::do_apply_event");
- int error= 0;
- Relay_log_info *rli= rgi->rli;
-
- switch (binlog_version)
- {
- case 3:
- case 4:
- /*
- This can either be 4.x (then a Start_log_event_v3 is only at master
- startup so we are sure the master has restarted and cleared his temp
- tables; the event always has 'created'>0) or 5.0 (then we have to test
- 'created').
- */
- if (created)
- {
- rli->close_temporary_tables();
-
- /*
- The following is only false if we get here with a BINLOG statement
- */
- if (rli->mi)
- cleanup_load_tmpdir(&rli->mi->cmp_connection_name);
- }
- break;
-
- /*
- Now the older formats; in that case load_tmpdir is cleaned up by the I/O
- thread.
- */
- case 1:
- if (strncmp(rli->relay_log.description_event_for_exec->server_version,
- "3.23.57",7) >= 0 && created)
- {
- /*
- Can distinguish, based on the value of 'created': this event was
- generated at master startup.
- */
- rli->close_temporary_tables();
- }
- /*
- Otherwise, can't distinguish a Start_log_event generated at
- master startup and one generated by master FLUSH LOGS, so cannot
- be sure temp tables have to be dropped. So do nothing.
- */
- break;
- default:
- /*
- This case is not expected. It can be either an event corruption or an
- unsupported binary log version.
- */
- rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
- ER_THD(thd, ER_SLAVE_FATAL_ERROR),
- "Binlog version not supported");
- DBUG_RETURN(1);
- }
- DBUG_RETURN(error);
-}
#endif /* defined(HAVE_REPLICATION) */
-/***************************************************************************
- Format_description_log_event methods
-****************************************************************************/
-
bool Format_description_log_event::write()
{
bool ret;
bool no_checksum;
/*
- We don't call Start_log_event_v3::write() because this would make 2
+ We don't call Start_log_event_v::write() because this would make 2
my_b_safe_write().
*/
uchar buff[START_V3_HEADER_LEN+1];
@@ -2720,9 +2551,8 @@ int Format_description_log_event::do_apply_event(rpl_group_info *rgi)
}
/*
- If this event comes from ourselves, there is no cleaning task to
- perform, we don't call Start_log_event_v3::do_apply_event()
- (this was just to update the log's description event).
+ If this event comes from ourselves, there is no cleaning task to perform,
+ we don't do cleanup (this was just to update the log's description event).
*/
if (server_id != (uint32) global_system_variables.server_id)
{
@@ -2735,7 +2565,24 @@ int Format_description_log_event::do_apply_event(rpl_group_info *rgi)
0, then 96, then jump to first really asked event (which is
>96). So this is ok.
*/
- ret= Start_log_event_v3::do_apply_event(rgi);
+ switch (binlog_version)
+ {
+ case 4:
+ if (created)
+ {
+ rli->close_temporary_tables();
+
+ /* The following is only false if we get here with a BINLOG statement */
+ if (rli->mi)
+ cleanup_load_tmpdir(&rli->mi->cmp_connection_name);
+ }
+ break;
+ default:
+ rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ ER_THD(thd, ER_SLAVE_FATAL_ERROR),
+ "Binlog version not supported");
+ ret= 1;
+ }
}
if (!ret)
@@ -2804,566 +2651,6 @@ int Start_encryption_log_event::do_update_pos(rpl_group_info *rgi)
/**************************************************************************
- Load_log_event methods
-**************************************************************************/
-
-#if defined(HAVE_REPLICATION)
-bool Load_log_event::print_query(THD *thd, bool need_db, const char *cs,
- String *buf, my_off_t *fn_start,
- my_off_t *fn_end, const char *qualify_db)
-{
- if (need_db && db && db_len)
- {
- buf->append(STRING_WITH_LEN("use "));
- append_identifier(thd, buf, db, db_len);
- buf->append(STRING_WITH_LEN("; "));
- }
-
- buf->append(STRING_WITH_LEN("LOAD DATA "));
-
- if (is_concurrent)
- buf->append(STRING_WITH_LEN("CONCURRENT "));
-
- if (fn_start)
- *fn_start= buf->length();
-
- if (check_fname_outside_temp_buf())
- buf->append(STRING_WITH_LEN("LOCAL "));
- buf->append(STRING_WITH_LEN("INFILE '"));
- buf->append_for_single_quote(fname, fname_len);
- buf->append(STRING_WITH_LEN("' "));
-
- if (sql_ex.opt_flags & REPLACE_FLAG)
- buf->append(STRING_WITH_LEN("REPLACE "));
- else if (sql_ex.opt_flags & IGNORE_FLAG)
- buf->append(STRING_WITH_LEN("IGNORE "));
-
- buf->append(STRING_WITH_LEN("INTO"));
-
- if (fn_end)
- *fn_end= buf->length();
-
- buf->append(STRING_WITH_LEN(" TABLE "));
- if (qualify_db)
- {
- append_identifier(thd, buf, qualify_db, strlen(qualify_db));
- buf->append(STRING_WITH_LEN("."));
- }
- append_identifier(thd, buf, table_name, table_name_len);
-
- if (cs != NULL)
- {
- buf->append(STRING_WITH_LEN(" CHARACTER SET "));
- buf->append(cs, strlen(cs));
- }
-
- /* We have to create all optional fields as the default is not empty */
- buf->append(STRING_WITH_LEN(" FIELDS TERMINATED BY "));
- pretty_print_str(buf, sql_ex.field_term, sql_ex.field_term_len);
- if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG)
- buf->append(STRING_WITH_LEN(" OPTIONALLY "));
- buf->append(STRING_WITH_LEN(" ENCLOSED BY "));
- pretty_print_str(buf, sql_ex.enclosed, sql_ex.enclosed_len);
-
- buf->append(STRING_WITH_LEN(" ESCAPED BY "));
- pretty_print_str(buf, sql_ex.escaped, sql_ex.escaped_len);
-
- buf->append(STRING_WITH_LEN(" LINES TERMINATED BY "));
- pretty_print_str(buf, sql_ex.line_term, sql_ex.line_term_len);
- if (sql_ex.line_start_len)
- {
- buf->append(STRING_WITH_LEN(" STARTING BY "));
- pretty_print_str(buf, sql_ex.line_start, sql_ex.line_start_len);
- }
-
- if ((long) skip_lines > 0)
- {
- buf->append(STRING_WITH_LEN(" IGNORE "));
- buf->append_ulonglong(skip_lines);
- buf->append(STRING_WITH_LEN(" LINES "));
- }
-
- if (num_fields)
- {
- uint i;
- const char *field= fields;
- buf->append(STRING_WITH_LEN(" ("));
- for (i = 0; i < num_fields; i++)
- {
- if (i)
- {
- /*
- Yes, the space and comma is reversed here. But this is mostly dead
- code, at most used when reading really old binlogs from old servers,
- so better just leave it as is...
- */
- buf->append(STRING_WITH_LEN(" ,"));
- }
- append_identifier(thd, buf, field, field_lens[i]);
- field+= field_lens[i] + 1;
- }
- buf->append(STRING_WITH_LEN(")"));
- }
- return 0;
-}
-
-
-void Load_log_event::pack_info(Protocol *protocol)
-{
- char query_buffer[1024];
- String query_str(query_buffer, sizeof(query_buffer), system_charset_info);
-
- query_str.length(0);
- print_query(protocol->thd, TRUE, NULL, &query_str, 0, 0, NULL);
- protocol->store(query_str.ptr(), query_str.length(), &my_charset_bin);
-}
-#endif /* defined(HAVE_REPLICATION) */
-
-
-bool Load_log_event::write_data_header()
-{
- char buf[LOAD_HEADER_LEN];
- int4store(buf + L_THREAD_ID_OFFSET, slave_proxy_id);
- int4store(buf + L_EXEC_TIME_OFFSET, exec_time);
- int4store(buf + L_SKIP_LINES_OFFSET, skip_lines);
- buf[L_TBL_LEN_OFFSET] = (char)table_name_len;
- buf[L_DB_LEN_OFFSET] = (char)db_len;
- int4store(buf + L_NUM_FIELDS_OFFSET, num_fields);
- return write_data(buf, LOAD_HEADER_LEN) != 0;
-}
-
-
-bool Load_log_event::write_data_body()
-{
- if (sql_ex.write_data(writer))
- return 1;
- if (num_fields && fields && field_lens)
- {
- if (write_data(field_lens, num_fields) ||
- write_data(fields, field_block_len))
- return 1;
- }
- return (write_data(table_name, table_name_len + 1) ||
- write_data(db, db_len + 1) ||
- write_data(fname, fname_len));
-}
-
-
-Load_log_event::Load_log_event(THD *thd_arg, const sql_exchange *ex,
- const char *db_arg, const char *table_name_arg,
- List<Item> &fields_arg,
- bool is_concurrent_arg,
- enum enum_duplicates handle_dup,
- bool ignore, bool using_trans)
- :Log_event(thd_arg,
- (thd_arg->used & THD::THREAD_SPECIFIC_USED)
- ? LOG_EVENT_THREAD_SPECIFIC_F : 0,
- using_trans),
- thread_id(thd_arg->thread_id),
- slave_proxy_id((ulong)thd_arg->variables.pseudo_thread_id),
- num_fields(0),fields(0),
- field_lens(0),field_block_len(0),
- table_name(table_name_arg ? table_name_arg : ""),
- db(db_arg), fname(ex->file_name), local_fname(FALSE),
- is_concurrent(is_concurrent_arg)
-{
- time_t end_time;
- time(&end_time);
- exec_time = (ulong) (end_time - thd_arg->start_time);
- /* db can never be a zero pointer in 4.0 */
- db_len = (uint32) strlen(db);
- table_name_len = (uint32) strlen(table_name);
- fname_len = (fname) ? (uint) strlen(fname) : 0;
- sql_ex.field_term = ex->field_term->ptr();
- sql_ex.field_term_len = (uint8) ex->field_term->length();
- sql_ex.enclosed = ex->enclosed->ptr();
- sql_ex.enclosed_len = (uint8) ex->enclosed->length();
- sql_ex.line_term = ex->line_term->ptr();
- sql_ex.line_term_len = (uint8) ex->line_term->length();
- sql_ex.line_start = ex->line_start->ptr();
- sql_ex.line_start_len = (uint8) ex->line_start->length();
- sql_ex.escaped = ex->escaped->ptr();
- sql_ex.escaped_len = (uint8) ex->escaped->length();
- sql_ex.opt_flags = 0;
- sql_ex.cached_new_format = -1;
-
- if (ex->dumpfile)
- sql_ex.opt_flags|= DUMPFILE_FLAG;
- if (ex->opt_enclosed)
- sql_ex.opt_flags|= OPT_ENCLOSED_FLAG;
-
- sql_ex.empty_flags= 0;
-
- switch (handle_dup) {
- case DUP_REPLACE:
- sql_ex.opt_flags|= REPLACE_FLAG;
- break;
- case DUP_UPDATE: // Impossible here
- case DUP_ERROR:
- break;
- }
- if (ignore)
- sql_ex.opt_flags|= IGNORE_FLAG;
-
- if (!ex->field_term->length())
- sql_ex.empty_flags |= FIELD_TERM_EMPTY;
- if (!ex->enclosed->length())
- sql_ex.empty_flags |= ENCLOSED_EMPTY;
- if (!ex->line_term->length())
- sql_ex.empty_flags |= LINE_TERM_EMPTY;
- if (!ex->line_start->length())
- sql_ex.empty_flags |= LINE_START_EMPTY;
- if (!ex->escaped->length())
- sql_ex.empty_flags |= ESCAPED_EMPTY;
-
- skip_lines = ex->skip_lines;
-
- List_iterator<Item> li(fields_arg);
- field_lens_buf.length(0);
- fields_buf.length(0);
- Item* item;
- while ((item = li++))
- {
- num_fields++;
- uchar len= (uchar) item->name.length;
- field_block_len += len + 1;
- fields_buf.append(item->name.str, len + 1);
- field_lens_buf.append((char*)&len, 1);
- }
-
- field_lens = (const uchar*)field_lens_buf.ptr();
- fields = fields_buf.ptr();
-}
-
-
-/**
- Load_log_event::set_fields()
-
- @note
- This function can not use the member variable
- for the database, since LOAD DATA INFILE on the slave
- can be for a different database than the current one.
- This is the reason for the affected_db argument to this method.
-*/
-
-void Load_log_event::set_fields(const char* affected_db,
- List<Item> &field_list,
- Name_resolution_context *context)
-{
- uint i;
- const char* field = fields;
- for (i= 0; i < num_fields; i++)
- {
- LEX_CSTRING field_name= {field, field_lens[i] };
- field_list.push_back(new (thd->mem_root)
- Item_field(thd, context,
- Lex_cstring_strlen(affected_db),
- Lex_cstring_strlen(table_name),
- field_name),
- thd->mem_root);
- field+= field_lens[i] + 1;
- }
-}
-
-
-#if defined(HAVE_REPLICATION)
-/**
- Does the data loading job when executing a LOAD DATA on the slave.
-
- @param net
- @param rli
- @param use_rli_only_for_errors If set to 1, rli is provided to
- Load_log_event::exec_event only for this
- function to have RPL_LOG_NAME and
- rli->last_slave_error, both being used by
- error reports. rli's position advancing
- is skipped (done by the caller which is
- Execute_load_log_event::exec_event).
- If set to 0, rli is provided for full use,
- i.e. for error reports and position
- advancing.
-
- @todo
- fix this; this can be done by testing rules in
- Create_file_log_event::exec_event() and then discarding Append_block and
- al.
- @todo
- this is a bug - this needs to be moved to the I/O thread
-
- @retval
- 0 Success
- @retval
- 1 Failure
-*/
-
-int Load_log_event::do_apply_event(NET* net, rpl_group_info *rgi,
- bool use_rli_only_for_errors)
-{
- Relay_log_info const *rli= rgi->rli;
- Rpl_filter *rpl_filter= rli->mi->rpl_filter;
- DBUG_ENTER("Load_log_event::do_apply_event");
-
- DBUG_ASSERT(thd->query() == 0);
- set_thd_db(thd, rpl_filter, db, db_len);
- thd->clear_error(1);
-
- /* see Query_log_event::do_apply_event() and BUG#13360 */
- DBUG_ASSERT(!rgi->m_table_map.count());
- /*
- Usually lex_start() is called by mysql_parse(), but we need it here
- as the present method does not call mysql_parse().
- */
- lex_start(thd);
- thd->lex->local_file= local_fname;
- thd->reset_for_next_command(0); // Errors are cleared above
-
- /*
- We test replicate_*_db rules. Note that we have already prepared
- the file to load, even if we are going to ignore and delete it
- now. So it is possible that we did a lot of disk writes for
- nothing. In other words, a big LOAD DATA INFILE on the master will
- still consume a lot of space on the slave (space in the relay log
- + space of temp files: twice the space of the file to load...)
- even if it will finally be ignored. TODO: fix this; this can be
- done by testing rules in Create_file_log_event::do_apply_event()
- and then discarding Append_block and al. Another way is do the
- filtering in the I/O thread (more efficient: no disk writes at
- all).
-
-
- Note: We do not need to execute reset_one_shot_variables() if this
- db_ok() test fails.
- Reason: The db stored in binlog events is the same for SET and for
- its companion query. If the SET is ignored because of
- db_ok(), the companion query will also be ignored, and if
- the companion query is ignored in the db_ok() test of
- ::do_apply_event(), then the companion SET also have so
- we don't need to reset_one_shot_variables().
- */
- if (rpl_filter->db_ok(thd->db.str))
- {
- thd->set_time(when, when_sec_part);
- thd->set_query_id(next_query_id());
- thd->get_stmt_da()->opt_clear_warning_info(thd->query_id);
-
- TABLE_LIST tables;
- LEX_CSTRING db_name= { thd->strmake(thd->db.str, thd->db.length), thd->db.length };
- if (lower_case_table_names)
- my_casedn_str(system_charset_info, (char *)table_name);
- LEX_CSTRING tbl_name= { table_name, strlen(table_name) };
- tables.init_one_table(&db_name, &tbl_name, 0, TL_WRITE);
- tables.updating= 1;
-
- // the table will be opened in mysql_load
- if (rpl_filter->is_on() && !rpl_filter->tables_ok(thd->db.str, &tables))
- {
- // TODO: this is a bug - this needs to be moved to the I/O thread
- if (net)
- skip_load_data_infile(net);
- }
- else
- {
- enum enum_duplicates handle_dup;
- bool ignore= 0;
- char query_buffer[1024];
- String query_str(query_buffer, sizeof(query_buffer), system_charset_info);
- char *load_data_query;
-
- query_str.length(0);
- /*
- Forge LOAD DATA INFILE query which will be used in SHOW PROCESS LIST
- and written to slave's binlog if binlogging is on.
- */
- print_query(thd, FALSE, NULL, &query_str, NULL, NULL, NULL);
- if (!(load_data_query= (char *)thd->strmake(query_str.ptr(),
- query_str.length())))
- {
- /*
- This will set thd->fatal_error in case of OOM. So we surely will notice
- that something is wrong.
- */
- goto error;
- }
-
- thd->set_query(load_data_query, (uint) (query_str.length()));
-
- if (sql_ex.opt_flags & REPLACE_FLAG)
- handle_dup= DUP_REPLACE;
- else if (sql_ex.opt_flags & IGNORE_FLAG)
- {
- ignore= 1;
- handle_dup= DUP_ERROR;
- }
- else
- {
- /*
- When replication is running fine, if it was DUP_ERROR on the
- master then we could choose IGNORE here, because if DUP_ERROR
- suceeded on master, and data is identical on the master and slave,
- then there should be no uniqueness errors on slave, so IGNORE is
- the same as DUP_ERROR. But in the unlikely case of uniqueness errors
- (because the data on the master and slave happen to be different
- (user error or bug), we want LOAD DATA to print an error message on
- the slave to discover the problem.
-
- If reading from net (a 3.23 master), mysql_load() will change this
- to IGNORE.
- */
- handle_dup= DUP_ERROR;
- }
- /*
- We need to set thd->lex->sql_command and thd->lex->duplicates
- since InnoDB tests these variables to decide if this is a LOAD
- DATA ... REPLACE INTO ... statement even though mysql_parse()
- is not called. This is not needed in 5.0 since there the LOAD
- DATA ... statement is replicated using mysql_parse(), which
- sets the thd->lex fields correctly.
- */
- thd->lex->sql_command= SQLCOM_LOAD;
- thd->lex->duplicates= handle_dup;
-
- sql_exchange ex((char*)fname, sql_ex.opt_flags & DUMPFILE_FLAG);
- String field_term(sql_ex.field_term,sql_ex.field_term_len,log_cs);
- String enclosed(sql_ex.enclosed,sql_ex.enclosed_len,log_cs);
- String line_term(sql_ex.line_term,sql_ex.line_term_len,log_cs);
- String line_start(sql_ex.line_start,sql_ex.line_start_len,log_cs);
- String escaped(sql_ex.escaped,sql_ex.escaped_len, log_cs);
- ex.field_term= &field_term;
- ex.enclosed= &enclosed;
- ex.line_term= &line_term;
- ex.line_start= &line_start;
- ex.escaped= &escaped;
-
- ex.opt_enclosed = (sql_ex.opt_flags & OPT_ENCLOSED_FLAG);
- if (sql_ex.empty_flags & FIELD_TERM_EMPTY)
- ex.field_term->length(0);
-
- ex.skip_lines = skip_lines;
- List<Item> field_list;
- thd->lex->first_select_lex()->context.resolve_in_table_list_only(&tables);
- set_fields(tables.db.str,
- field_list, &thd->lex->first_select_lex()->context);
- thd->variables.pseudo_thread_id= thread_id;
- if (net)
- {
- // mysql_load will use thd->net to read the file
- thd->net.vio = net->vio;
- // Make sure the client does not get confused about the packet sequence
- thd->net.pkt_nr = net->pkt_nr;
- }
- /*
- It is safe to use tmp_list twice because we are not going to
- update it inside mysql_load().
- */
- List<Item> tmp_list;
- if (thd->open_temporary_tables(&tables) ||
- mysql_load(thd, &ex, &tables, field_list, tmp_list, tmp_list,
- handle_dup, ignore, net != 0))
- thd->is_slave_error= 1;
- if (thd->cuted_fields)
- {
- /* log_pos is the position of the LOAD event in the master log */
- sql_print_warning("Slave: load data infile on table '%s' at "
- "log position %llu in log '%s' produced %ld "
- "warning(s). Default database: '%s'",
- (char*) table_name, log_pos, RPL_LOG_NAME,
- (ulong) thd->cuted_fields,
- thd->get_db());
- }
- if (net)
- net->pkt_nr= thd->net.pkt_nr;
- }
- }
- else
- {
- /*
- We will just ask the master to send us /dev/null if we do not
- want to load the data.
- TODO: this a bug - needs to be done in I/O thread
- */
- if (net)
- skip_load_data_infile(net);
- }
-
-error:
- thd->net.vio = 0;
- const char *remember_db= thd->get_db();
- thd->catalog= 0;
- thd->set_db(&null_clex_str); /* will free the current database */
- thd->reset_query();
- thd->get_stmt_da()->set_overwrite_status(true);
- thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
- thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_GTID_BEGIN);
- thd->get_stmt_da()->set_overwrite_status(false);
- close_thread_tables(thd);
- /*
- - If transaction rollback was requested due to deadlock
- perform it and release metadata locks.
- - If inside a multi-statement transaction,
- defer the release of metadata locks until the current
- transaction is either committed or rolled back. This prevents
- other statements from modifying the table for the entire
- duration of this transaction. This provides commit ordering
- and guarantees serializability across multiple transactions.
- - If in autocommit mode, or outside a transactional context,
- automatically release metadata locks of the current statement.
- */
- if (thd->transaction_rollback_request)
- {
- trans_rollback_implicit(thd);
- thd->release_transactional_locks();
- }
- else if (! thd->in_multi_stmt_transaction_mode())
- thd->release_transactional_locks();
- else
- thd->mdl_context.release_statement_locks();
-
- DBUG_EXECUTE_IF("LOAD_DATA_INFILE_has_fatal_error",
- thd->is_slave_error= 0; thd->is_fatal_error= 1;);
-
- if (unlikely(thd->is_slave_error))
- {
- /* this err/sql_errno code is copy-paste from net_send_error() */
- const char *err;
- int sql_errno;
- if (thd->is_error())
- {
- err= thd->get_stmt_da()->message();
- sql_errno= thd->get_stmt_da()->sql_errno();
- }
- else
- {
- sql_errno=ER_UNKNOWN_ERROR;
- err= ER_THD(thd, sql_errno);
- }
- rli->report(ERROR_LEVEL, sql_errno, rgi->gtid_info(), "\
-Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
- err, (char*)table_name, remember_db);
- free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
- DBUG_RETURN(1);
- }
- free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
-
- if (unlikely(thd->is_fatal_error))
- {
- char buf[256];
- my_snprintf(buf, sizeof(buf),
- "Running LOAD DATA INFILE on table '%-.64s'."
- " Default database: '%-.64s'",
- (char*)table_name,
- remember_db);
-
- rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(),
- ER_THD(thd, ER_SLAVE_FATAL_ERROR), buf);
- DBUG_RETURN(1);
- }
-
- DBUG_RETURN( use_rli_only_for_errors ? 0 : Log_event::do_apply_event(rgi) );
-}
-#endif
-
-
-/**************************************************************************
Rotate_log_event methods
**************************************************************************/
@@ -4858,10 +4145,9 @@ User_var_log_event::do_shall_skip(rpl_group_info *rgi)
written all DROP TEMPORARY TABLE (prepared statements' deletion is
TODO only when we binlog prep stmts). We used to clean up
slave_load_tmpdir, but this is useless as it has been cleared at the
- end of LOAD DATA INFILE. So we have nothing to do here. The place
- were we must do this cleaning is in
- Start_log_event_v3::do_apply_event(), not here. Because if we come
- here, the master was sane.
+ end of LOAD DATA INFILE. So we have nothing to do here. The place were we
+ must do this cleaning is in Format_description_log_event::do_apply_event(),
+ not here. Because if we come here, the master was sane.
This must only be called from the Slave SQL thread, since it calls
Relay_log_info::flush().
@@ -4895,178 +4181,6 @@ int Stop_log_event::do_update_pos(rpl_group_info *rgi)
/**************************************************************************
- Create_file_log_event methods
-**************************************************************************/
-
-Create_file_log_event::
-Create_file_log_event(THD* thd_arg, sql_exchange* ex,
- const char* db_arg, const char* table_name_arg,
- List<Item>& fields_arg,
- bool is_concurrent_arg,
- enum enum_duplicates handle_dup,
- bool ignore,
- uchar* block_arg, uint block_len_arg, bool using_trans)
- :Load_log_event(thd_arg, ex, db_arg, table_name_arg, fields_arg,
- is_concurrent_arg,
- handle_dup, ignore, using_trans),
- fake_base(0), block(block_arg), event_buf(0), block_len(block_len_arg),
- file_id(thd_arg->file_id = mysql_bin_log.next_file_id())
-{
- DBUG_ENTER("Create_file_log_event");
- sql_ex.force_new_format();
- DBUG_VOID_RETURN;
-}
-
-
-/*
- Create_file_log_event::write_data_body()
-*/
-
-bool Create_file_log_event::write_data_body()
-{
- bool res;
- if ((res= Load_log_event::write_data_body()) || fake_base)
- return res;
- return write_data("", 1) ||
- write_data(block, block_len);
-}
-
-
-/*
- Create_file_log_event::write_data_header()
-*/
-
-bool Create_file_log_event::write_data_header()
-{
- bool res;
- uchar buf[CREATE_FILE_HEADER_LEN];
- if ((res= Load_log_event::write_data_header()) || fake_base)
- return res;
- int4store(buf + CF_FILE_ID_OFFSET, file_id);
- return write_data(buf, CREATE_FILE_HEADER_LEN) != 0;
-}
-
-
-/*
- Create_file_log_event::write_base()
-*/
-
-bool Create_file_log_event::write_base()
-{
- bool res;
- fake_base= 1; // pretend we are Load event
- res= write();
- fake_base= 0;
- return res;
-}
-
-
-#if defined(HAVE_REPLICATION)
-void Create_file_log_event::pack_info(Protocol *protocol)
-{
- char buf[SAFE_NAME_LEN*2 + 30 + 21*2], *pos;
- pos= strmov(buf, "db=");
- memcpy(pos, db, db_len);
- pos= strmov(pos + db_len, ";table=");
- memcpy(pos, table_name, table_name_len);
- pos= strmov(pos + table_name_len, ";file_id=");
- pos= int10_to_str((long) file_id, pos, 10);
- pos= strmov(pos, ";block_len=");
- pos= int10_to_str((long) block_len, pos, 10);
- protocol->store(buf, (uint) (pos-buf), &my_charset_bin);
-}
-#endif /* defined(HAVE_REPLICATION) */
-
-
-/**
- Create_file_log_event::do_apply_event()
- Constructor for Create_file_log_event to intantiate an event
- from the relay log on the slave.
-
- @retval
- 0 Success
- @retval
- 1 Failure
-*/
-
-#if defined(HAVE_REPLICATION)
-int Create_file_log_event::do_apply_event(rpl_group_info *rgi)
-{
- char fname_buf[FN_REFLEN];
- char *ext;
- int fd = -1;
- IO_CACHE file;
- Log_event_writer lew(&file, 0);
- int error = 1;
- Relay_log_info const *rli= rgi->rli;
-
- THD_STAGE_INFO(thd, stage_making_temp_file_create_before_load_data);
- bzero((char*)&file, sizeof(file));
- ext= slave_load_file_stem(fname_buf, file_id, server_id, ".info",
- &rli->mi->connection_name);
- /* old copy may exist already */
- mysql_file_delete(key_file_log_event_info, fname_buf, MYF(0));
- if ((fd= mysql_file_create(key_file_log_event_info,
- fname_buf, CREATE_MODE,
- O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
- MYF(MY_WME))) < 0 ||
- init_io_cache(&file, fd, IO_SIZE, WRITE_CACHE, (my_off_t)0, 0,
- MYF(MY_WME|MY_NABP)))
- {
- rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
- "Error in Create_file event: could not open file '%s'",
- fname_buf);
- goto err;
- }
-
- // a trick to avoid allocating another buffer
- fname= fname_buf;
- fname_len= (uint) (strmov(ext, ".data") - fname);
- writer= &lew;
- if (write_base())
- {
- strmov(ext, ".info"); // to have it right in the error message
- rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
- "Error in Create_file event: could not write to file '%s'",
- fname_buf);
- goto err;
- }
- end_io_cache(&file);
- mysql_file_close(fd, MYF(0));
-
- // fname_buf now already has .data, not .info, because we did our trick
- /* old copy may exist already */
- mysql_file_delete(key_file_log_event_data, fname_buf, MYF(0));
- if ((fd= mysql_file_create(key_file_log_event_data,
- fname_buf, CREATE_MODE,
- O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
- MYF(MY_WME))) < 0)
- {
- rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
- "Error in Create_file event: could not open file '%s'",
- fname_buf);
- goto err;
- }
- if (mysql_file_write(fd, (uchar*) block, block_len, MYF(MY_WME+MY_NABP)))
- {
- rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
- "Error in Create_file event: write to '%s' failed",
- fname_buf);
- goto err;
- }
- error=0; // Everything is ok
-
-err:
- if (unlikely(error))
- end_io_cache(&file);
- if (likely(fd >= 0))
- mysql_file_close(fd, MYF(0));
- return error != 0;
-}
-#endif /* defined(HAVE_REPLICATION) */
-
-
-/**************************************************************************
Append_block_log_event methods
**************************************************************************/
@@ -5228,130 +4342,6 @@ int Delete_file_log_event::do_apply_event(rpl_group_info *rgi)
/**************************************************************************
- Execute_load_log_event methods
-**************************************************************************/
-
-Execute_load_log_event::Execute_load_log_event(THD *thd_arg,
- const char* db_arg,
- bool using_trans)
- :Log_event(thd_arg, 0, using_trans), file_id(thd_arg->file_id), db(db_arg)
-{
-}
-
-
-bool Execute_load_log_event::write()
-{
- uchar buf[EXEC_LOAD_HEADER_LEN];
- int4store(buf + EL_FILE_ID_OFFSET, file_id);
- return write_header(sizeof(buf)) ||
- write_data(buf, sizeof(buf)) ||
- write_footer();
-}
-
-
-#if defined(HAVE_REPLICATION)
-void Execute_load_log_event::pack_info(Protocol *protocol)
-{
- char buf[64];
- uint length;
- length= (uint) sprintf(buf, ";file_id=%u", (uint) file_id);
- protocol->store(buf, (int32) length, &my_charset_bin);
-}
-
-
-/*
- Execute_load_log_event::do_apply_event()
-*/
-
-int Execute_load_log_event::do_apply_event(rpl_group_info *rgi)
-{
- char fname[FN_REFLEN+10];
- char *ext;
- int fd;
- int error= 1;
- IO_CACHE file;
- Load_log_event *lev= 0;
- Relay_log_info const *rli= rgi->rli;
-
- ext= slave_load_file_stem(fname, file_id, server_id, ".info",
- &rli->mi->cmp_connection_name);
- if ((fd= mysql_file_open(key_file_log_event_info,
- fname, O_RDONLY | O_BINARY | O_NOFOLLOW,
- MYF(MY_WME))) < 0 ||
- init_io_cache(&file, fd, IO_SIZE, READ_CACHE, (my_off_t)0, 0,
- MYF(MY_WME|MY_NABP)))
- {
- rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
- "Error in Exec_load event: could not open file '%s'",
- fname);
- goto err;
- }
- if (!(lev= (Load_log_event*)
- Log_event::read_log_event(&file,
- rli->relay_log.description_event_for_exec,
- opt_slave_sql_verify_checksum)) ||
- lev->get_type_code() != NEW_LOAD_EVENT)
- {
- rli->report(ERROR_LEVEL, 0, rgi->gtid_info(), "Error in Exec_load event: "
- "file '%s' appears corrupted", fname);
- goto err;
- }
- lev->thd = thd;
- /*
- lev->do_apply_event should use rli only for errors i.e. should
- not advance rli's position.
-
- lev->do_apply_event is the place where the table is loaded (it
- calls mysql_load()).
- */
-
- if (lev->do_apply_event(0,rgi,1))
- {
- /*
- We want to indicate the name of the file that could not be loaded
- (SQL_LOADxxx).
- But as we are here we are sure the error is in rli->last_slave_error and
- rli->last_slave_errno (example of error: duplicate entry for key), so we
- don't want to overwrite it with the filename.
- What we want instead is add the filename to the current error message.
- */
- char *tmp= my_strdup(PSI_INSTRUMENT_ME, rli->last_error().message, MYF(MY_WME));
- if (tmp)
- {
- rli->report(ERROR_LEVEL, rli->last_error().number, rgi->gtid_info(),
- "%s. Failed executing load from '%s'", tmp, fname);
- my_free(tmp);
- }
- goto err;
- }
- /*
- We have an open file descriptor to the .info file; we need to close it
- or Windows will refuse to delete the file in mysql_file_delete().
- */
- if (fd >= 0)
- {
- mysql_file_close(fd, MYF(0));
- end_io_cache(&file);
- fd= -1;
- }
- mysql_file_delete(key_file_log_event_info, fname, MYF(MY_WME));
- memcpy(ext, ".data", 6);
- mysql_file_delete(key_file_log_event_data, fname, MYF(MY_WME));
- error = 0;
-
-err:
- delete lev;
- if (fd >= 0)
- {
- mysql_file_close(fd, MYF(0));
- end_io_cache(&file);
- }
- return error;
-}
-
-#endif /* defined(HAVE_REPLICATION) */
-
-/**************************************************************************
Begin_load_query_log_event methods
**************************************************************************/
@@ -6386,7 +5376,7 @@ bool Rows_log_event::write_data_header()
});
int6store(buf + RW_MAPID_OFFSET, m_table_id);
int2store(buf + RW_FLAGS_OFFSET, m_flags);
- return write_data(buf, ROWS_HEADER_LEN);
+ return write_data(buf, ROWS_HEADER_LEN_V1);
}
bool Rows_log_event::write_data_body()
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index 2701dac56c4..7f930c9d9ae 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -20,6 +20,74 @@
#include "key.h"
#include "sql_statistics.h"
#include "rowid_filter.h"
+#include "optimizer_defaults.h"
+
+static void get_sweep_read_cost(TABLE *table, ha_rows nrows, bool interrupted,
+ Cost_estimate *cost);
+
+
+
+/*
+ The following calculation is the same as in multi_range_read_info()
+
+ @param cost Total cost is stored here
+ @param keyno Key number
+ @param n_ranges Number of different ranges
+ @param multi_row_ranges Number of ranges that are not EQ_REF
+ @param flags Flags. Only HA_MRR_INDEX_ONLY is used.
+ @param total_rows Number of rows expected to be read.
+ @param io_blocks Number of blocks we expect to read for
+ a not clustered index.
+ @param unassigned_single_point_ranges
+ Number of blocks we have not yet read for
+ a clustered index.
+*/
+
+void handler::calculate_costs(Cost_estimate *cost, uint keyno,
+ uint n_ranges, uint multi_row_ranges,
+ uint flags,
+ ha_rows total_rows,
+ ulonglong io_blocks,
+ ulonglong unassigned_single_point_ranges)
+{
+ cost->reset(this);
+
+ if (!is_clustering_key(keyno))
+ {
+ cost->index_cost= ha_keyread_time(keyno, n_ranges,
+ total_rows + multi_row_ranges,
+ io_blocks);
+
+ if (!(flags & HA_MRR_INDEX_ONLY))
+ {
+ /* ha_rnd_pos_time includes ROW_COPY_COST */
+ cost->row_cost= ha_rnd_pos_time(total_rows);
+ /* Adjust io cost to data size */
+ cost->row_cost.io= MY_MIN(cost->row_cost.io, row_blocks());
+ }
+ else
+ {
+ /* Index only read */
+ cost->copy_cost= rows2double(total_rows) * KEY_COPY_COST;
+ }
+ }
+ else
+ {
+ /* Clustered index */
+ io_blocks= unassigned_single_point_ranges;
+ cost->index_cost= ha_keyread_time(keyno, n_ranges,
+ total_rows + multi_row_ranges,
+ io_blocks);
+ cost->copy_cost= rows2double(total_rows) * ROW_COPY_COST;
+ }
+ /* Adjust io cost to data size */
+ cost->index_cost.io= MY_MIN(cost->index_cost.io, index_blocks(keyno));
+
+ cost->comp_cost= (rows2double(total_rows) * WHERE_COST +
+ MULTI_RANGE_READ_SETUP_COST);
+}
+
+
/****************************************************************************
* Default MRR implementation (MRR to non-MRR converter)
@@ -37,8 +105,8 @@
@param n_ranges_arg Number of ranges in the sequence, or 0 if the caller
can't efficiently determine it
@param bufsz INOUT IN: Size of the buffer available for use
- OUT: Size of the buffer that is expected to be actually
- used, or 0 if buffer is not needed.
+ OUT: Size of the buffer that is expected to be
+ actually used, or 0 if buffer is not needed.
@param flags INOUT A combination of HA_MRR_* flags
@param cost OUT Estimated cost of MRR access
@@ -56,10 +124,12 @@
contain scan parameters.
*/
+
ha_rows
handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param, uint n_ranges_arg,
uint *bufsz, uint *flags,
+ ha_rows top_limit,
Cost_estimate *cost)
{
KEY_MULTI_RANGE range;
@@ -286,11 +356,15 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
(single_point_ranges - assigned_single_point_ranges).
We don't add these to io_blocks as we don't want to penalize equal
- readss (if we did, a range that would read 5 rows would be
+ reads (if we did, a range that would read 5 rows would be
regarded as better than one equal read).
Better to assume we have done a records_in_range() for the equal
range and it's also cached.
+
+ One effect of this is that io_blocks for simple ranges are often 0,
+ as the blocks where already read by records_in_range and we assume
+ that we don't have to read it again.
*/
io_blocks= (range_blocks_cnt - edge_blocks_cnt);
unassigned_single_point_ranges+= (single_point_ranges -
@@ -299,41 +373,33 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
if (total_rows != HA_POS_ERROR)
{
set_if_smaller(total_rows, max_rows);
-
- /* The following calculation is the same as in multi_range_read_info(): */
*flags |= HA_MRR_USE_DEFAULT_IMPL;
- cost->reset();
- cost->avg_io_cost= cost->idx_avg_io_cost= avg_io_cost();
-
- if (!is_clustering_key(keyno))
- {
- cost->idx_io_count= (double) io_blocks;
- cost->idx_cpu_cost= (keyread_time(keyno, 0, total_rows) +
- n_ranges * IDX_LOOKUP_COST);
- if (!(*flags & HA_MRR_INDEX_ONLY))
- cost->cpu_cost= read_time(keyno, 0, total_rows);
- }
- else
+ calculate_costs(cost, keyno, n_ranges,
+ n_ranges - (uint) single_point_ranges,
+ *flags, total_rows,
+ io_blocks, unassigned_single_point_ranges);
+ if (top_limit < total_rows)
{
/*
- Clustered index
- If all index dives are to a few blocks, then limit the
- ranges used by read_time to the number of dives.
+ Calculate what the cost would be if we only have to read 'top_limit'
+ rows. This is the lowest possible cost when using the range
+ when we find the 'accepted rows' at once.
*/
- io_blocks+= unassigned_single_point_ranges;
- cost->idx_cpu_cost= n_ranges * IDX_LOOKUP_COST;
- uint limited_ranges= (uint) MY_MIN((ulonglong) n_ranges, io_blocks);
- cost->cpu_cost= read_time(keyno, limited_ranges, total_rows);
+ Cost_estimate limit_cost;
+ calculate_costs(&limit_cost, keyno, n_ranges,
+ n_ranges - (uint)single_point_ranges,
+ *flags, top_limit, io_blocks,
+ unassigned_single_point_ranges);
+ cost->limit_cost= limit_cost.total_cost();
}
- cost->cpu_cost+= (rows2double(total_rows) / TIME_FOR_COMPARE +
- MULTI_RANGE_READ_SETUP_COST);
+ DBUG_PRINT("statistics",
+ ("key: %s rows: %llu total_cost: %.3f io_blocks: %llu "
+ "cpu_cost: %.3f",
+ table->s->keynames.type_names[keyno],
+ (ulonglong) total_rows, cost->total_cost(),
+ (ulonglong) (cost->row_cost.io + cost->index_cost.io),
+ (double) (cost->row_cost.cpu + cost->index_cost.cpu)));
}
- DBUG_PRINT("statistics",
- ("key: %s rows: %llu total_cost: %.3f io_blocks: %llu "
- "idx_io_count: %.3f cpu_cost: %.3f io_count: %.3f",
- table->s->keynames.type_names[keyno],
- (ulonglong) total_rows, cost->total_cost(), (ulonglong) io_blocks,
- cost->idx_io_count, cost->cpu_cost, cost->io_count));
DBUG_RETURN(total_rows);
}
@@ -357,7 +423,7 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
@param keyno Index number
@param n_ranges Estimated number of ranges (i.e. intervals) in the
range sequence.
- @param n_rows Estimated total number of records contained within all
+ @param total_rows Estimated total number of records contained within all
of the ranges
@param bufsz INOUT IN: Size of the buffer available for use
OUT: Size of the buffer that will be actually used, or
@@ -372,7 +438,8 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
other Error or can't perform the requested scan
*/
-ha_rows handler::multi_range_read_info(uint keyno, uint n_ranges, uint n_rows,
+ha_rows handler::multi_range_read_info(uint keyno, uint n_ranges,
+ uint total_rows,
uint key_parts, uint *bufsz,
uint *flags, Cost_estimate *cost)
{
@@ -385,27 +452,32 @@ ha_rows handler::multi_range_read_info(uint keyno, uint n_ranges, uint n_rows,
*bufsz= 0; /* Default implementation doesn't need a buffer */
*flags |= HA_MRR_USE_DEFAULT_IMPL;
- cost->reset();
+ cost->reset(this);
+
/* Produce the same cost as non-MRR code does */
if (!is_clustering_key(keyno))
{
- /*
- idx_io_count could potentially be increased with the number of
- index leaf blocks we have to read for finding n_rows.
- */
- cost->idx_io_count= n_ranges;
- cost->idx_cpu_cost= (keyread_time(keyno, 0, n_rows) +
- n_ranges * IDX_LOOKUP_COST);
+ cost->index_cost= ha_keyread_time(keyno, n_ranges, total_rows, 0);
+
if (!(*flags & HA_MRR_INDEX_ONLY))
{
- cost->cpu_cost= read_time(keyno, 0, n_rows);
+ /* ha_rnd_pos_time includes ROW_COPY_COST */
+ cost->row_cost= ha_rnd_pos_time(total_rows);
+ }
+ else
+ {
+ /* Index only read */
+ cost->copy_cost= rows2double(total_rows) * KEY_COPY_COST;
}
}
else
{
- cost->cpu_cost= read_time(keyno, n_ranges, (uint)n_rows);
+ /* Clustering key */
+ cost->index_cost= ha_keyread_clustered_time(keyno, n_ranges, total_rows,
+ 0);
+ cost->copy_cost= rows2double(total_rows) * ROW_COPY_COST;
}
- cost->cpu_cost+= rows2double(n_rows) / TIME_FOR_COMPARE;
+ cost->comp_cost= rows2double(total_rows) * WHERE_COST;
return 0;
}
@@ -1700,8 +1772,9 @@ ha_rows DsMrr_impl::dsmrr_info(uint keyno, uint n_ranges, uint rows,
*/
ha_rows DsMrr_impl::dsmrr_info_const(uint keyno, RANGE_SEQ_IF *seq,
- void *seq_init_param, uint n_ranges,
- uint *bufsz, uint *flags, Cost_estimate *cost)
+ void *seq_init_param, uint n_ranges,
+ uint *bufsz, uint *flags, ha_rows limit,
+ Cost_estimate *cost)
{
ha_rows rows;
uint def_flags= *flags;
@@ -1711,7 +1784,9 @@ ha_rows DsMrr_impl::dsmrr_info_const(uint keyno, RANGE_SEQ_IF *seq,
seq_init_param,
n_ranges,
&def_bufsz,
- &def_flags, cost);
+ &def_flags,
+ limit,
+ cost);
if (rows == HA_POS_ERROR)
{
/* Default implementation can't perform MRR scan => we can't either */
@@ -1918,7 +1993,8 @@ int DsMrr_impl::dsmrr_explain_info(uint mrr_mode, char *str, size_t size)
}
-static void get_sort_and_sweep_cost(TABLE *table, ha_rows nrows, Cost_estimate *cost);
+static void get_sort_and_sweep_cost(TABLE *table, ha_rows nrows,
+ Cost_estimate *cost);
/**
@@ -1949,7 +2025,6 @@ bool DsMrr_impl::get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
ha_rows rows_in_full_step;
ha_rows rows_in_last_step;
uint n_full_steps;
- double index_read_cost;
elem_size= primary_file->ref_length +
sizeof(void*) * (!MY_TEST(flags & HA_MRR_NO_ASSOCIATION));
@@ -1982,6 +2057,8 @@ bool DsMrr_impl::get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
rows_in_full_step= max_buff_entries;
rows_in_last_step= rows % max_buff_entries;
+ cost->reset(primary_file);
+
/* Adjust buffer size if we expect to use only part of the buffer */
if (n_full_steps)
{
@@ -1990,24 +2067,19 @@ bool DsMrr_impl::get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
}
else
{
- cost->reset();
- *buffer_size= (uint)MY_MAX(*buffer_size,
- (size_t)(1.2*rows_in_last_step) * elem_size +
- primary_file->ref_length + table->key_info[keynr].key_length);
+ *buffer_size= ((uint) MY_MAX(*buffer_size,
+ (size_t)(1.2*rows_in_last_step) * elem_size +
+ primary_file->ref_length +
+ table->key_info[keynr].key_length));
}
Cost_estimate last_step_cost;
+ last_step_cost.avg_io_cost= cost->avg_io_cost;
get_sort_and_sweep_cost(table, rows_in_last_step, &last_step_cost);
cost->add(&last_step_cost);
- if (n_full_steps != 0)
- cost->mem_cost= *buffer_size;
- else
- cost->mem_cost= (double)rows_in_last_step * elem_size;
-
/* Total cost of all index accesses */
- index_read_cost= primary_file->keyread_time(keynr, 1, rows);
- cost->add_io(index_read_cost, 1 /* Random seeks */);
+ cost->index_cost= primary_file->ha_keyread_and_copy_time(keynr, 1, rows, 0);
return FALSE;
}
@@ -2031,55 +2103,17 @@ void get_sort_and_sweep_cost(TABLE *table, ha_rows nrows, Cost_estimate *cost)
{
get_sweep_read_cost(table, nrows, FALSE, cost);
/* Add cost of qsort call: n * log2(n) * cost(rowid_comparison) */
- double cmp_op= rows2double(nrows) * (1.0 / TIME_FOR_COMPARE_ROWID);
+ double cmp_op= rows2double(nrows) * ROWID_COMPARE_COST_THD(table->in_use);
if (cmp_op < 3)
cmp_op= 3;
cost->cpu_cost += cmp_op * log2(cmp_op);
}
- else
- cost->reset();
}
/**
Get cost of reading nrows table records in a "disk sweep"
- A disk sweep read is a sequence of handler->rnd_pos(rowid) calls that made
- for an ordered sequence of rowids.
-
- We assume hard disk IO. The read is performed as follows:
-
- 1. The disk head is moved to the needed cylinder
- 2. The controller waits for the plate to rotate
- 3. The data is transferred
-
- Time to do #3 is insignificant compared to #2+#1.
-
- Time to move the disk head is proportional to head travel distance.
-
- Time to wait for the plate to rotate depends on whether the disk head
- was moved or not.
-
- If disk head wasn't moved, the wait time is proportional to distance
- between the previous block and the block we're reading.
-
- If the head was moved, we don't know how much we'll need to wait for the
- plate to rotate. We assume the wait time to be a variate with a mean of
- 0.5 of full rotation time.
-
- Our cost units are "random disk seeks". The cost of random disk seek is
- actually not a constant, it depends one range of cylinders we're going
- to access. We make it constant by introducing a fuzzy concept of "typical
- datafile length" (it's fuzzy as it's hard to tell whether it should
- include index file, temp.tables etc). Then random seek cost is:
-
- 1 = half_rotation_cost + move_cost * 1/3 * typical_data_file_length
-
- We define half_rotation_cost as DISK_SEEK_BASE_COST=0.9.
-
- If handler::avg_io_cost() < 1.0, then we will trust the handler
- when it comes to the average cost (this is for example true for HEAP).
-
@param table Table to be accessed
@param nrows Number of rows to retrieve
@param interrupted TRUE <=> Assume that the disk sweep will be
@@ -2087,16 +2121,18 @@ void get_sort_and_sweep_cost(TABLE *table, ha_rows nrows, Cost_estimate *cost)
@param cost OUT The cost.
*/
-void get_sweep_read_cost(TABLE *table, ha_rows nrows, bool interrupted,
- Cost_estimate *cost)
+static void get_sweep_read_cost(TABLE *table, ha_rows nrows, bool interrupted,
+ Cost_estimate *cost)
{
DBUG_ENTER("get_sweep_read_cost");
- cost->reset();
+#ifndef OLD_SWEEP_COST
+ cost->row_cost= table->file->ha_rnd_pos_call_time(nrows);
+#else
if (table->file->pk_is_clustering_key(table->s->primary_key))
{
- cost->cpu_cost= table->file->read_time(table->s->primary_key,
- (uint) nrows, nrows);
+ cost->cpu_cost= table->file->ha_read_and_copy_time(table->s->primary_key,
+ (uint) nrows, nrows);
}
else if ((cost->avg_io_cost= table->file->avg_io_cost()) >= 0.999)
{
@@ -2118,7 +2154,9 @@ void get_sweep_read_cost(TABLE *table, ha_rows nrows, bool interrupted,
DISK_SEEK_PROP_COST*n_blocks/busy_blocks);
}
}
- DBUG_PRINT("info",("returning cost=%g", cost->total_cost()));
+ cost->cpu_cost+= rows2double(n_rows) * ROW_COPY_COST;
+#endif
+ DBUG_PRINT("info",("returning cost: %g", cost->total_cost()));
DBUG_VOID_RETURN;
}
diff --git a/sql/multi_range_read.h b/sql/multi_range_read.h
index 57cfd21727f..930ee3f238b 100644
--- a/sql/multi_range_read.h
+++ b/sql/multi_range_read.h
@@ -576,7 +576,7 @@ public:
ha_rows dsmrr_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param, uint n_ranges, uint *bufsz,
- uint *flags, Cost_estimate *cost);
+ uint *flags, ha_rows limit, Cost_estimate *cost);
int dsmrr_explain_info(uint mrr_mode, char *str, size_t size);
private:
diff --git a/sql/my_json_writer.h b/sql/my_json_writer.h
index 87d1a7facf1..a581899aab2 100644
--- a/sql/my_json_writer.h
+++ b/sql/my_json_writer.h
@@ -407,7 +407,7 @@ public:
virtual ~Json_writer_struct() = default;
#endif
- bool trace_started() const
+ inline bool trace_started() const
{
return my_writer != 0;
}
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 23f34dd0d84..ee368def9be 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2008, 2022, MariaDB
+ Copyright (c) 2008, 2023, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -52,6 +52,7 @@
#include "sql_expression_cache.h" // subquery_cache_miss, subquery_cache_hit
#include "sys_vars_shared.h"
#include "ddl_log.h"
+#include "optimizer_defaults.h"
#include <m_ctype.h>
#include <my_dir.h>
@@ -594,7 +595,6 @@ extern "C" {
char server_version[SERVER_VERSION_LENGTH];
}
char *server_version_ptr;
-bool using_custom_server_version= false;
char *mysqld_unix_port, *opt_mysql_tmpdir;
ulong thread_handling;
@@ -734,7 +734,7 @@ mysql_mutex_t LOCK_prepared_stmt_count;
#ifdef HAVE_OPENSSL
mysql_mutex_t LOCK_des_key_file;
#endif
-mysql_mutex_t LOCK_backup_log;
+mysql_mutex_t LOCK_backup_log, LOCK_optimizer_costs;
mysql_rwlock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
mysql_rwlock_t LOCK_ssl_refresh;
mysql_rwlock_t LOCK_all_status_vars;
@@ -757,8 +757,6 @@ char *opt_relay_logname = 0, *opt_relaylog_index_name=0;
char *opt_logname, *opt_slow_logname, *opt_bin_logname;
char *opt_binlog_index_name=0;
-
-
/* Static variables */
my_bool opt_stack_trace;
@@ -906,7 +904,7 @@ PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_LOCK_crypt, key_LOCK_delayed_create,
key_LOCK_delayed_insert, key_LOCK_delayed_status, key_LOCK_error_log,
key_LOCK_gdl, key_LOCK_global_system_variables,
- key_LOCK_manager, key_LOCK_backup_log,
+ key_LOCK_manager, key_LOCK_backup_log, key_LOCK_optimizer_costs,
key_LOCK_prepared_stmt_count,
key_LOCK_rpl_status, key_LOCK_server_started,
key_LOCK_status, key_LOCK_temp_pool,
@@ -969,6 +967,7 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_hash_filo_lock, "hash_filo::lock", 0},
{ &key_LOCK_active_mi, "LOCK_active_mi", PSI_FLAG_GLOBAL},
{ &key_LOCK_backup_log, "LOCK_backup_log", PSI_FLAG_GLOBAL},
+ { &key_LOCK_optimizer_costs, "LOCK_optimizer_costs", PSI_FLAG_GLOBAL},
{ &key_LOCK_temp_pool, "LOCK_temp_pool", PSI_FLAG_GLOBAL},
{ &key_LOCK_thread_id, "LOCK_thread_id", PSI_FLAG_GLOBAL},
{ &key_LOCK_crypt, "LOCK_crypt", PSI_FLAG_GLOBAL},
@@ -2006,6 +2005,7 @@ static void clean_up(bool print_message)
mdl_destroy();
dflt_key_cache= 0;
key_caches.delete_elements(free_key_cache);
+ free_all_optimizer_costs();
wt_end();
multi_keycache_free();
sp_cache_end();
@@ -2128,6 +2128,7 @@ static void clean_up_mutexes()
mysql_mutex_destroy(&LOCK_active_mi);
mysql_rwlock_destroy(&LOCK_ssl_refresh);
mysql_mutex_destroy(&LOCK_backup_log);
+ mysql_mutex_destroy(&LOCK_optimizer_costs);
mysql_mutex_destroy(&LOCK_temp_pool);
mysql_rwlock_destroy(&LOCK_sys_init_connect);
mysql_rwlock_destroy(&LOCK_sys_init_slave);
@@ -4439,7 +4440,6 @@ static int init_common_variables()
return 1;
}
-
#ifdef WITH_WSREP
/*
We need to initialize auxiliary variables, that will be
@@ -4501,6 +4501,8 @@ static int init_thread_environment()
mysql_mutex_init(key_LOCK_commit_ordered, &LOCK_commit_ordered,
MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_LOCK_backup_log, &LOCK_backup_log, MY_MUTEX_INIT_FAST);
+ mysql_mutex_init(key_LOCK_optimizer_costs, &LOCK_optimizer_costs,
+ MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_temp_pool, &LOCK_temp_pool, MY_MUTEX_INIT_FAST);
#ifdef HAVE_OPENSSL
@@ -4856,6 +4858,25 @@ init_gtid_pos_auto_engines(void)
return 0;
}
+
+#define us_to_ms(X) if (X > 0) X/= 1000;
+static int adjust_optimizer_costs(void *, OPTIMIZER_COSTS *oc, void *)
+{
+ us_to_ms(oc->disk_read_cost);
+ us_to_ms(oc->index_block_copy_cost);
+ us_to_ms(oc->key_cmp_cost);
+ us_to_ms(oc->key_copy_cost);
+ us_to_ms(oc->key_lookup_cost);
+ us_to_ms(oc->key_next_find_cost);
+ us_to_ms(oc->row_copy_cost);
+ us_to_ms(oc->row_lookup_cost);
+ us_to_ms(oc->row_next_find_cost);
+ us_to_ms(oc->rowid_cmp_cost);
+ us_to_ms(oc->rowid_copy_cost);
+ return 0;
+}
+
+
#define MYSQL_COMPATIBILITY_OPTION(option) \
{ option, OPT_MYSQL_COMPATIBILITY, \
0, 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0 }
@@ -4997,12 +5018,12 @@ static int init_server_components()
/* need to configure logging before initializing storage engines */
if (!opt_bin_log_used && !WSREP_ON)
{
- if (opt_log_slave_updates)
- sql_print_warning("You need to use --log-bin to make "
- "--log-slave-updates work.");
- if (binlog_format_used)
- sql_print_warning("You need to use --log-bin to make "
- "--binlog-format work.");
+ if (opt_log_slave_updates && (global_system_variables.log_warnings >= 4))
+ sql_print_information("You need to use --log-bin to make "
+ "--log-slave-updates work.");
+ if (binlog_format_used && (global_system_variables.log_warnings >= 4))
+ sql_print_information("You need to use --log-bin to make "
+ "--binlog-format work.");
}
/* Check that we have not let the format to unspecified at this point */
@@ -5199,8 +5220,15 @@ static int init_server_components()
tc_log= 0; // ha_initialize_handlerton() needs that
- if (!opt_abort && ddl_log_initialize())
- unireg_abort(1);
+ if (!opt_abort)
+ {
+ if (ddl_log_initialize())
+ unireg_abort(1);
+
+ process_optimizer_costs((process_optimizer_costs_t)adjust_optimizer_costs, 0);
+ us_to_ms(global_system_variables.optimizer_where_cost);
+ us_to_ms(global_system_variables.optimizer_scan_setup_cost);
+ }
if (plugin_init(&remaining_argc, remaining_argv,
(opt_noacl ? PLUGIN_INIT_SKIP_PLUGIN_TABLE : 0) |
@@ -5313,6 +5341,10 @@ static int init_server_components()
MARIADB_REMOVED_OPTION("innodb-thread-concurrency"),
MARIADB_REMOVED_OPTION("innodb-thread-sleep-delay"),
MARIADB_REMOVED_OPTION("innodb-undo-logs"),
+
+ /* The following options were deprecated in 10.9 */
+ MARIADB_REMOVED_OPTION("innodb-change-buffering"),
+
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
/*
@@ -5426,6 +5458,7 @@ static int init_server_components()
unireg_abort(1);
}
#endif
+ copy_tmptable_optimizer_costs();
#ifdef WITH_WSREP
/*
@@ -5484,9 +5517,9 @@ static int init_server_components()
}
else
{
- if (binlog_expire_logs_seconds)
- sql_print_warning("You need to use --log-bin to make --expire-logs-days "
- "or --binlog-expire-logs-seconds work.");
+ if (binlog_expire_logs_seconds && (global_system_variables.log_warnings >= 4))
+ sql_print_information("You need to use --log-bin to make --expire-logs-days "
+ "or --binlog-expire-logs-seconds work.");
}
#endif
@@ -7400,12 +7433,14 @@ SHOW_VAR status_vars[]= {
{"Feature_dynamic_columns", (char*) offsetof(STATUS_VAR, feature_dynamic_columns), SHOW_LONG_STATUS},
{"Feature_fulltext", (char*) offsetof(STATUS_VAR, feature_fulltext), SHOW_LONG_STATUS},
{"Feature_gis", (char*) offsetof(STATUS_VAR, feature_gis), SHOW_LONG_STATUS},
- {"Feature_insert_returning", (char*)offsetof(STATUS_VAR, feature_insert_returning), SHOW_LONG_STATUS},
- {"Feature_invisible_columns", (char*) offsetof(STATUS_VAR, feature_invisible_columns), SHOW_LONG_STATUS},
+ {"Feature_insert_returning", (char*)offsetof(STATUS_VAR, feature_insert_returning), SHOW_LONG_STATUS},
+ {"Feature_into_outfile", (char*) offsetof(STATUS_VAR, feature_into_outfile), SHOW_LONG_STATUS},
+ {"Feature_into_variable", (char*) offsetof(STATUS_VAR, feature_into_variable), SHOW_LONG_STATUS},
+ {"Feature_invisible_columns",(char*) offsetof(STATUS_VAR, feature_invisible_columns), SHOW_LONG_STATUS},
{"Feature_json", (char*) offsetof(STATUS_VAR, feature_json), SHOW_LONG_STATUS},
{"Feature_locale", (char*) offsetof(STATUS_VAR, feature_locale), SHOW_LONG_STATUS},
{"Feature_subquery", (char*) offsetof(STATUS_VAR, feature_subquery), SHOW_LONG_STATUS},
- {"Feature_system_versioning", (char*) offsetof(STATUS_VAR, feature_system_versioning), SHOW_LONG_STATUS},
+ {"Feature_system_versioning",(char*) offsetof(STATUS_VAR, feature_system_versioning), SHOW_LONG_STATUS},
{"Feature_application_time_periods", (char*) offsetof(STATUS_VAR, feature_application_time_periods), SHOW_LONG_STATUS},
{"Feature_timezone", (char*) offsetof(STATUS_VAR, feature_timezone), SHOW_LONG_STATUS},
{"Feature_trigger", (char*) offsetof(STATUS_VAR, feature_trigger), SHOW_LONG_STATUS},
@@ -7820,12 +7855,17 @@ static int mysql_init_variables(void)
strnmov(server_version, MYSQL_SERVER_VERSION, sizeof(server_version)-1);
thread_cache.init();
key_caches.empty();
- if (!(dflt_key_cache= get_or_create_key_cache(default_key_cache_base.str,
- default_key_cache_base.length)))
+ if (!(dflt_key_cache= get_or_create_key_cache(default_base.str,
+ default_base.length)))
{
sql_print_error("Cannot allocate the keycache");
return 1;
}
+ if (create_default_optimizer_costs())
+ {
+ sql_print_error("Cannot allocate optimizer_costs");
+ return 1;
+ }
/* set key_cache_hash.default_value = dflt_key_cache */
multi_keycache_init();
@@ -8043,7 +8083,6 @@ mysqld_get_one_option(const struct my_option *opt, const char *argument,
strmake(server_version, argument, sizeof(server_version) - 1);
set_sys_var_value_origin(&server_version_ptr,
*filename ? sys_var::CONFIG : sys_var::COMMAND_LINE, filename);
- using_custom_server_version= true;
}
#ifndef EMBEDDED_LIBRARY
else
@@ -8401,11 +8440,14 @@ mysqld_get_one_option(const struct my_option *opt, const char *argument,
}
-/** Handle arguments for multiple key caches. */
+/**
+ Handle arguments for multiple key caches, replication_options and
+ optimizer_costs
+ */
C_MODE_START
-static void*
+static void *
mysql_getopt_value(const char *name, uint length,
const struct my_option *option, int *error)
{
@@ -8443,6 +8485,7 @@ mysql_getopt_value(const char *name, uint length,
}
/* We return in all cases above. Let us silence -Wimplicit-fallthrough */
DBUG_ASSERT(0);
+ break;
#ifdef HAVE_REPLICATION
/* fall through */
case OPT_REPLICATE_DO_DB:
@@ -8470,11 +8513,62 @@ mysql_getopt_value(const char *name, uint length,
}
return 0;
}
-#endif
+ break;
+#endif
+ case OPT_COSTS_DISK_READ_COST:
+ case OPT_COSTS_INDEX_BLOCK_COPY_COST:
+ case OPT_COSTS_KEY_CMP_COST:
+ case OPT_COSTS_KEY_COPY_COST:
+ case OPT_COSTS_KEY_LOOKUP_COST:
+ case OPT_COSTS_KEY_NEXT_FIND_COST:
+ case OPT_COSTS_DISK_READ_RATIO:
+ case OPT_COSTS_ROW_COPY_COST:
+ case OPT_COSTS_ROW_LOOKUP_COST:
+ case OPT_COSTS_ROW_NEXT_FIND_COST:
+ case OPT_COSTS_ROWID_CMP_COST:
+ case OPT_COSTS_ROWID_COPY_COST:
+ {
+ OPTIMIZER_COSTS *costs;
+ if (unlikely(!(costs= get_or_create_optimizer_costs(name, length))))
+ {
+ if (error)
+ *error= EXIT_OUT_OF_MEMORY;
+ return 0;
+ }
+ switch (option->id) {
+ case OPT_COSTS_DISK_READ_COST:
+ return &costs->disk_read_cost;
+ case OPT_COSTS_INDEX_BLOCK_COPY_COST:
+ return &costs->index_block_copy_cost;
+ case OPT_COSTS_KEY_CMP_COST:
+ return &costs->key_cmp_cost;
+ case OPT_COSTS_KEY_COPY_COST:
+ return &costs->key_copy_cost;
+ case OPT_COSTS_KEY_LOOKUP_COST:
+ return &costs->key_lookup_cost;
+ case OPT_COSTS_KEY_NEXT_FIND_COST:
+ return &costs->key_next_find_cost;
+ case OPT_COSTS_DISK_READ_RATIO:
+ return &costs->disk_read_ratio;
+ case OPT_COSTS_ROW_COPY_COST:
+ return &costs->row_copy_cost;
+ case OPT_COSTS_ROW_LOOKUP_COST:
+ return &costs->row_lookup_cost;
+ case OPT_COSTS_ROW_NEXT_FIND_COST:
+ return &costs->row_next_find_cost;
+ case OPT_COSTS_ROWID_CMP_COST:
+ return &costs->rowid_cmp_cost;
+ case OPT_COSTS_ROWID_COPY_COST:
+ return &costs->rowid_copy_cost;
+ default:
+ DBUG_ASSERT(0);
+ }
+ }
}
return option->value;
}
+
static void option_error_reporter(enum loglevel level, const char *format, ...)
{
va_list args;
@@ -9078,7 +9172,7 @@ void refresh_status(THD *thd)
reset_pfs_status_stats();
#endif
- /* Add thread's status variabes to global status */
+ /* Add thread's status variabels to global status */
add_to_status(&global_status_var, &thd->status_var);
/* Reset thread's status variables */
diff --git a/sql/mysqld.h b/sql/mysqld.h
index 2d9a4842d8c..54cafdcde15 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -330,7 +330,7 @@ extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_LOCK_logger, key_LOCK_manager,
key_LOCK_prepared_stmt_count,
key_LOCK_rpl_status, key_LOCK_server_started,
- key_LOCK_status,
+ key_LOCK_status, key_LOCK_optimizer_costs,
key_LOCK_thd_data, key_LOCK_thd_kill,
key_LOCK_user_conn, key_LOG_LOCK_log,
key_master_info_data_lock, key_master_info_run_lock,
@@ -740,7 +740,6 @@ extern const char *mysql_real_data_home_ptr;
extern ulong thread_handling;
extern "C" MYSQL_PLUGIN_IMPORT char server_version[SERVER_VERSION_LENGTH];
extern char *server_version_ptr;
-extern bool using_custom_server_version;
extern MYSQL_PLUGIN_IMPORT char mysql_real_data_home[];
extern char mysql_unpacked_real_data_home[];
extern MYSQL_PLUGIN_IMPORT struct system_variables global_system_variables;
@@ -760,7 +759,8 @@ extern mysql_mutex_t
LOCK_error_log, LOCK_delayed_insert, LOCK_short_uuid_generator,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
LOCK_active_mi, LOCK_manager, LOCK_user_conn,
- LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_backup_log;
+ LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_backup_log,
+ LOCK_optimizer_costs;
extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_global_system_variables;
extern mysql_rwlock_t LOCK_all_status_vars;
extern mysql_mutex_t LOCK_start_thread;
@@ -795,6 +795,18 @@ enum options_mysqld
OPT_BINLOG_IGNORE_DB,
OPT_BIN_LOG,
OPT_BOOTSTRAP,
+ OPT_COSTS_DISK_READ_COST,
+ OPT_COSTS_INDEX_BLOCK_COPY_COST,
+ OPT_COSTS_KEY_CMP_COST,
+ OPT_COSTS_KEY_COPY_COST,
+ OPT_COSTS_KEY_LOOKUP_COST,
+ OPT_COSTS_KEY_NEXT_FIND_COST,
+ OPT_COSTS_DISK_READ_RATIO,
+ OPT_COSTS_ROW_COPY_COST,
+ OPT_COSTS_ROW_LOOKUP_COST,
+ OPT_COSTS_ROW_NEXT_FIND_COST,
+ OPT_COSTS_ROWID_CMP_COST,
+ OPT_COSTS_ROWID_COPY_COST,
OPT_EXPIRE_LOGS_DAYS,
OPT_BINLOG_EXPIRE_LOGS_SECONDS,
OPT_CONSOLE,
diff --git a/sql/opt_index_cond_pushdown.cc b/sql/opt_index_cond_pushdown.cc
index 6a24fa95b68..51aa70fa02c 100644
--- a/sql/opt_index_cond_pushdown.cc
+++ b/sql/opt_index_cond_pushdown.cc
@@ -17,6 +17,7 @@
#include "mariadb.h"
#include "sql_select.h"
#include "sql_test.h"
+#include "opt_trace.h"
/****************************************************************************
* Index Condition Pushdown code starts
@@ -334,13 +335,12 @@ void push_index_cond(JOIN_TAB *tab, uint keyno)
than on a non-clustered key. This restriction should be
re-evaluated when WL#6061 is implemented.
*/
- if ((tab->table->file->index_flags(keyno, 0, 1) &
- HA_DO_INDEX_COND_PUSHDOWN) &&
+ if ((tab->table->key_info[keyno].index_flags & HA_DO_INDEX_COND_PUSHDOWN) &&
optimizer_flag(tab->join->thd, OPTIMIZER_SWITCH_INDEX_COND_PUSHDOWN) &&
tab->join->thd->lex->sql_command != SQLCOM_UPDATE_MULTI &&
tab->join->thd->lex->sql_command != SQLCOM_DELETE_MULTI &&
tab->type != JT_CONST && tab->type != JT_SYSTEM &&
- !tab->table->file->is_clustering_key(keyno)) // 6
+ !tab->table->is_clustering_key(keyno)) // 6
{
DBUG_EXECUTE("where",
print_where(tab->select_cond, "full cond", QT_ORDINARY););
@@ -355,6 +355,8 @@ void push_index_cond(JOIN_TAB *tab, uint keyno)
{
Item *idx_remainder_cond= 0;
tab->pre_idx_push_select_cond= tab->select_cond;
+ Json_writer_object trace(tab->join->thd);
+ trace.add_table_name(tab);
/*
For BKA cache we store condition to special BKA cache field
because evaluation of the condition requires additional operations
@@ -387,6 +389,7 @@ void push_index_cond(JOIN_TAB *tab, uint keyno)
idx_remainder_cond= NULL;
}
}
+ trace.add("index_condition", idx_cond);
/*
Disable eq_ref's "lookup cache" if we've pushed down an index
@@ -424,6 +427,10 @@ void push_index_cond(JOIN_TAB *tab, uint keyno)
}
else
tab->select_cond= idx_remainder_cond;
+
+ if (tab->select_cond)
+ trace.add("row_condition", tab->select_cond);
+
if (tab->select)
{
DBUG_EXECUTE("where",
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 86bd5663623..02b5770aa8c 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -19,8 +19,8 @@
Fix that MAYBE_KEY are stored in the tree so that we can detect use
of full hash keys for queries like:
- select s.id, kws.keyword_id from sites as s,kws where s.id=kws.site_id and kws.keyword_id in (204,205);
-
+ select s.id, kws.keyword_id from sites as s,kws where s.id=kws.site_id and
+ kws.keyword_id in (204,205);
*/
/*
@@ -345,7 +345,8 @@ struct st_index_scan_info;
struct st_ror_scan_info;
static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts);
-static ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
+static ha_rows check_quick_select(PARAM *param, uint idx, ha_rows limit,
+ bool index_only,
SEL_ARG *tree, bool update_tbl_stats,
uint *mrr_flags, uint *bufsize,
Cost_estimate *cost, bool *is_ror_scan);
@@ -356,7 +357,8 @@ QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint index,
static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
bool index_read_must_be_used,
bool for_range_access,
- double read_time);
+ double read_time, ha_rows limit,
+ bool using_table_scan);
static
TRP_INDEX_INTERSECT *get_best_index_intersect(PARAM *param, SEL_TREE *tree,
double read_time);
@@ -370,7 +372,9 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
double read_time);
static
TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
- double read_time, bool named_trace= false);
+ double read_time, ha_rows limit,
+ bool named_trace,
+ bool using_table_scan);
static
TABLE_READ_PLAN *merge_same_index_scans(PARAM *param, SEL_IMERGE *imerge,
TRP_INDEX_MERGE *imerge_trp,
@@ -412,7 +416,6 @@ static bool eq_tree(SEL_ARG* a,SEL_ARG *b);
SEL_ARG null_element(SEL_ARG::IMPOSSIBLE);
static bool null_part_in_key(KEY_PART *key_part, const uchar *key,
uint length);
-static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts);
static
SEL_ARG *enforce_sel_arg_weight_limit(RANGE_OPT_PARAM *param, uint keyno,
@@ -2171,6 +2174,27 @@ int SEL_ARG::sel_cmp(Field *field, uchar *a, uchar *b, uint8 a_flag,
}
+/*
+ Check if min and values are equal
+
+ @return 1 if equal
+*/
+
+bool SEL_ARG::min_max_are_equal() const
+{
+ uint offset= 0;
+ if (field->real_maybe_null()) // If null is part of key
+ {
+ if (*min_value != *max_value)
+ return 0;
+ if (*min_value)
+ return 1; // NULL where equal
+ offset= 1; // Skip NULL marker
+ }
+ return field->key_cmp(min_value+offset, max_value+offset) == 0;
+}
+
+
SEL_ARG *SEL_ARG::clone_tree(RANGE_OPT_PARAM *param)
{
SEL_ARG tmp_link,*next_arg,*root;
@@ -2294,9 +2318,11 @@ void TRP_RANGE::trace_basic_info(PARAM *param,
const KEY &cur_key= param->table->key_info[keynr_in_table];
const KEY_PART_INFO *key_part= cur_key.key_part;
- trace_object->add("type", "range_scan")
- .add("index", cur_key.name)
- .add("rows", records);
+ if (unlikely(trace_object->trace_started()))
+ trace_object->
+ add("type", "range_scan").
+ add("index", cur_key.name).
+ add("rows", records);
Json_writer_array trace_range(param->thd, "ranges");
@@ -2323,6 +2349,7 @@ public:
struct st_ror_scan_info *cpk_scan; /* Clustered PK scan, if there is one */
bool is_covering; /* TRUE if no row retrieval phase is necessary */
double index_scan_costs; /* SUM(cost(index_scan)) */
+ double cmp_cost; // Cost of out rows with WHERE clause
void trace_basic_info(PARAM *param,
Json_writer_object *trace_object) const;
};
@@ -2439,20 +2466,21 @@ void TRP_INDEX_MERGE::trace_basic_info(PARAM *param,
class TRP_GROUP_MIN_MAX : public TABLE_READ_PLAN
{
private:
- bool have_min, have_max, have_agg_distinct;
- KEY_PART_INFO *min_max_arg_part;
uint group_prefix_len;
uint used_key_parts;
uint group_key_parts;
- KEY *index_info;
uint index;
uint key_infix_len;
+ uint param_idx; /* Index of used key in param->key. */
uchar key_infix[MAX_KEY_LENGTH];
+ KEY *index_info;
+ KEY_PART_INFO *min_max_arg_part;
SEL_TREE *range_tree; /* Represents all range predicates in the query. */
SEL_ARG *index_tree; /* The SEL_ARG sub-tree corresponding to index_info. */
- uint param_idx; /* Index of used key in param->key. */
- bool is_index_scan; /* Use index_next() instead of random read */
+ bool have_min, have_max;
public:
+ bool have_agg_distinct;
+ bool is_index_scan; /* Use index_next() instead of random read */
/* Number of records selected by the ranges in index_tree. */
ha_rows quick_prefix_records;
public:
@@ -2465,13 +2493,14 @@ public:
uchar *key_infix_arg,
SEL_TREE *tree_arg, SEL_ARG *index_tree_arg,
uint param_idx_arg, ha_rows quick_prefix_records_arg)
- : have_min(have_min_arg), have_max(have_max_arg),
+ : group_prefix_len(group_prefix_len_arg), used_key_parts(used_key_parts_arg),
+ group_key_parts(group_key_parts_arg),
+ index(index_arg), key_infix_len(key_infix_len_arg), param_idx(param_idx_arg),
+ index_info(index_info_arg),min_max_arg_part(min_max_arg_part_arg),
+ range_tree(tree_arg), index_tree(index_tree_arg),
+ have_min(have_min_arg), have_max(have_max_arg),
have_agg_distinct(have_agg_distinct_arg),
- min_max_arg_part(min_max_arg_part_arg),
- group_prefix_len(group_prefix_len_arg), used_key_parts(used_key_parts_arg),
- group_key_parts(group_key_parts_arg), index_info(index_info_arg),
- index(index_arg), key_infix_len(key_infix_len_arg), range_tree(tree_arg),
- index_tree(index_tree_arg), param_idx(param_idx_arg), is_index_scan(FALSE),
+ is_index_scan(FALSE),
quick_prefix_records(quick_prefix_records_arg)
{
if (key_infix_len)
@@ -2500,11 +2529,13 @@ void TRP_GROUP_MIN_MAX::trace_basic_info(PARAM *param,
else
trace_object->add_null("min_max_arg");
- trace_object->add("min_aggregate", have_min)
- .add("max_aggregate", have_max)
- .add("distinct_aggregate", have_agg_distinct)
- .add("rows", records)
- .add("cost", read_cost);
+ if (unlikely(trace_object->trace_started()))
+ trace_object->
+ add("min_aggregate", have_min).
+ add("max_aggregate", have_max).
+ add("distinct_aggregate", have_agg_distinct).
+ add("rows", records).
+ add("cost", read_cost);
const KEY_PART_INFO *key_part= index_info->key_part;
{
@@ -2626,7 +2657,8 @@ static int fill_used_fields_bitmap(PARAM *param)
In the table struct the following information is updated:
quick_keys - Which keys can be used
quick_rows - How many rows the key matches
- opt_range_condition_rows - E(# rows that will satisfy the table condition)
+ opt_range_condition_rows - E(# rows that will satisfy the table
+ condition)
IMPLEMENTATION
opt_range_condition_rows value is obtained as follows:
@@ -2679,15 +2711,19 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
bool only_single_index_range_scan)
{
uint idx;
- double scan_time;
Item *notnull_cond= NULL;
TABLE_READ_PLAN *best_trp= NULL;
SEL_ARG **backup_keys= 0;
+ ha_rows table_records= head->stat_records();
+ handler *file= head->file;
+ bool impossible_range= 0;
DBUG_ENTER("SQL_SELECT::test_quick_select");
DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu",
(ulong) keys_to_use.to_ulonglong(), (ulong) prev_tables,
(ulong) const_tables));
- DBUG_PRINT("info", ("records: %lu", (ulong) head->stat_records()));
+ DBUG_PRINT("info", ("records: %llu", (ulonglong) table_records));
+ DBUG_ASSERT(table_records || !head->file->stats.records);
+
delete quick;
quick=0;
needed_reg.clear_all();
@@ -2696,40 +2732,29 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_ASSERT(!head->is_filled_at_execution());
if (keys_to_use.is_clear_all() || head->is_filled_at_execution())
DBUG_RETURN(0);
- records= head->stat_records();
+ records= table_records;
notnull_cond= head->notnull_cond;
- if (!records)
- records++; /* purecov: inspected */
- if (head->file->ha_table_flags() & HA_NON_COMPARABLE_ROWID)
+ if (file->ha_table_flags() & HA_NON_COMPARABLE_ROWID)
only_single_index_range_scan= 1;
if (head->force_index || force_quick_range)
- scan_time= read_time= DBL_MAX;
+ read_time= DBL_MAX;
else
{
- scan_time= rows2double(records) / TIME_FOR_COMPARE;
- /*
- The 2 is there to prefer range scans to full table scans.
- This is mainly to make the test suite happy as many tests has
- very few rows. In real life tables has more than a few rows and the
- +2 has no practical effect.
- */
- read_time= (double) head->file->scan_time() + scan_time + 2;
- if (limit < records && read_time < (double) records + scan_time + 1 )
- {
- read_time= (double) records + scan_time + 1; // Force to use index
+ read_time= file->cost(file->ha_scan_and_compare_time(records));
+ if (limit < records)
notnull_cond= NULL;
- }
}
possible_keys.clear_all();
DBUG_PRINT("info",("Time to scan table: %g", read_time));
- Json_writer_object table_records(thd);
- table_records.add_table_name(head);
+ Json_writer_object table_info(thd);
+ table_info.add_table_name(head);
Json_writer_object trace_range(thd, "range_analysis");
+ if (unlikely(thd->trace_started()) && read_time != DBL_MAX)
{
Json_writer_object table_rec(thd, "table_scan");
table_rec.add("rows", records).add("cost", read_time);
@@ -2745,14 +2770,14 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
KEY_PART *key_parts;
KEY *key_info;
PARAM param;
- bool force_group_by = false;
+ bool force_group_by= false, group_by_optimization_used= false;
if (check_stack_overrun(thd, 2*STACK_MIN_SIZE + sizeof(PARAM), buff))
DBUG_RETURN(0); // Fatal error flag is set
/* set up parameter that is passed to all functions */
param.thd= thd;
- param.baseflag= head->file->ha_table_flags();
+ param.baseflag= file->ha_table_flags();
param.prev_tables=prev_tables | const_tables;
param.read_tables=read_tables;
param.current_table= head->map;
@@ -2771,7 +2796,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
thd->no_errors=1; // Don't warn about NULL
init_sql_alloc(key_memory_quick_range_select_root, &alloc,
- thd->variables.range_alloc_block_size, 0, MYF(MY_THREAD_SPECIFIC));
+ thd->variables.range_alloc_block_size, 0,
+ MYF(MY_THREAD_SPECIFIC));
if (!(param.key_parts=
(KEY_PART*) alloc_root(&alloc,
sizeof(KEY_PART) *
@@ -2802,8 +2828,10 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
if (!keys_to_use.is_set(idx))
{
- trace_idx_details.add("usable", false)
- .add("cause", "not applicable");
+ if (unlikely(trace_idx_details.trace_started()))
+ trace_idx_details.
+ add("usable", false).
+ add("cause", "not applicable");
continue;
}
if (key_info->flags & HA_FULLTEXT)
@@ -2853,11 +2881,14 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
thd->mem_root= &alloc;
/* Calculate cost of full index read for the shortest covering index */
- if (!force_quick_range && !head->covering_keys.is_clear_all())
+ if (!force_quick_range && !head->covering_keys.is_clear_all() &&
+ !head->no_keyread)
{
- int key_for_use= find_shortest_key(head, &head->covering_keys);
- double key_read_time= (head->file->key_scan_time(key_for_use) +
- rows2double(records) / TIME_FOR_COMPARE);
+ double key_read_time;
+ uint key_for_use= find_shortest_key(head, &head->covering_keys);
+ key_read_time= file->cost(file->
+ ha_key_scan_and_compare_time(key_for_use,
+ records));
DBUG_PRINT("info", ("'all'+'using index' scan will be using key %d, "
"read time %g", key_for_use, key_read_time));
@@ -2868,10 +2899,14 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
read_time= key_read_time;
chosen= TRUE;
}
- trace_cov.add("index", head->key_info[key_for_use].name)
- .add("cost", key_read_time).add("chosen", chosen);
- if (!chosen)
- trace_cov.add("cause", "cost");
+ if (unlikely(trace_cov.trace_started()))
+ {
+ trace_cov.
+ add("index", head->key_info[key_for_use].name).
+ add("cost", key_read_time).add("chosen", chosen);
+ if (!chosen)
+ trace_cov.add("cause", "cost");
+ }
}
double best_read_time= read_time;
@@ -2893,7 +2928,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
{
if (tree->type == SEL_TREE::IMPOSSIBLE)
{
- records=0L; /* Return -1 from this function. */
+ records= 0;
+ impossible_range= 1; /* Return -1 from this function. */
read_time= (double) HA_POS_ERROR;
trace_range.add("impossible_range", true);
goto free_mem;
@@ -2927,6 +2963,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
TRP_INDEX_INTERSECT *intersect_trp;
bool can_build_covering= FALSE;
Json_writer_object trace_range(thd, "analyzing_range_alternatives");
+ TABLE_READ_PLAN *range_trp;
backup_keys= (SEL_ARG**) alloca(sizeof(backup_keys[0])*param.keys);
memcpy(&backup_keys[0], &tree->keys[0],
@@ -2935,9 +2972,10 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
remove_nonrange_trees(&param, tree);
/* Get best 'range' plan and prepare data for making other plans */
- if (auto range_trp= get_key_scans_params(&param, tree,
- only_single_index_range_scan,
- true, best_read_time))
+ if ((range_trp= get_key_scans_params(&param, tree,
+ only_single_index_range_scan,
+ true, best_read_time, limit,
+ 1)))
{
best_trp= range_trp;
best_read_time= best_trp->read_cost;
@@ -2959,8 +2997,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
if ((rori_trp= get_best_ror_intersect(&param, tree, best_read_time,
&can_build_covering)))
{
- best_trp= rori_trp;
- best_read_time= best_trp->read_cost;
+ best_trp= rori_trp;
+ best_read_time= rori_trp->read_cost;
/*
Try constructing covering ROR-intersect only if it looks possible
and worth doing.
@@ -2984,15 +3022,14 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
if ((intersect_trp= get_best_index_intersect(&param, tree,
best_read_time)))
{
- best_trp= intersect_trp;
- best_read_time= best_trp->read_cost;
- set_if_smaller(param.table->opt_range_condition_rows,
- intersect_trp->records);
+ best_trp= intersect_trp;
+ best_read_time= intersect_trp->read_cost;
+ param.table->set_opt_range_condition_rows(intersect_trp->records);
}
}
if (optimizer_flag(thd, OPTIMIZER_SWITCH_INDEX_MERGE) &&
- head->stat_records() != 0 && !only_single_index_range_scan)
+ table_records != 0 && !only_single_index_range_scan)
{
/* Try creating index_merge/ROR-union scan. */
SEL_IMERGE *imerge;
@@ -3004,10 +3041,10 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
Json_writer_array trace_idx_merge(thd, "analyzing_index_merge_union");
while ((imerge= it++))
{
- new_conj_trp= get_best_disjunct_quick(&param, imerge, best_read_time);
+ new_conj_trp= get_best_disjunct_quick(&param, imerge, best_read_time,
+ limit, 0, 1);
if (new_conj_trp)
- set_if_smaller(param.table->opt_range_condition_rows,
- new_conj_trp->records);
+ param.table->set_opt_range_condition_rows(new_conj_trp->records);
if (new_conj_trp &&
(!best_conj_trp ||
new_conj_trp->read_cost < best_conj_trp->read_cost))
@@ -3029,22 +3066,51 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
if (!only_single_index_range_scan)
{
TRP_GROUP_MIN_MAX *group_trp;
+ double duplicate_removal_cost= 0;
if (tree)
restore_nonrange_trees(&param, tree, backup_keys);
if ((group_trp= get_best_group_min_max(&param, tree, read_time)))
{
- param.table->opt_range_condition_rows= MY_MIN(group_trp->records,
- head->stat_records());
+ /* mark that we are changing opt_range_condition_rows */
+ group_by_optimization_used= 1;
+ param.table->set_opt_range_condition_rows(group_trp->records);
+ DBUG_PRINT("info", ("table_rows: %llu opt_range_condition_rows: %llu "
+ "group_trp->records: %ull",
+ table_records,
+ param.table->opt_range_condition_rows,
+ group_trp->records));
+
Json_writer_object grp_summary(thd, "best_group_range_summary");
if (unlikely(thd->trace_started()))
group_trp->trace_basic_info(&param, &grp_summary);
- if (group_trp->read_cost < best_read_time || force_group_by)
+ if (group_trp->have_agg_distinct && group_trp->is_index_scan)
+ {
+ /*
+ We are optimization a distinct aggregate, like
+ SELECT count(DISTINCT a,b,c) FROM ...
+
+ The group cost includes removal of the distinct, so to be
+ able to compare costs, we add small cost to the original cost
+ that stands for the extra work we have to do on the outside of
+ the engine to do duplicate elimination for each output row if
+ we are not using the grouping code.
+ */
+ duplicate_removal_cost= (DUPLICATE_REMOVAL_COST *
+ (best_trp ? best_trp->records :
+ table_records));
+ }
+ if (group_trp->read_cost < best_read_time + duplicate_removal_cost ||
+ force_group_by)
{
- grp_summary.add("chosen", true);
+ if (thd->trace_started())
+ {
+ if (duplicate_removal_cost)
+ grp_summary.add("duplicate_removal_cost", duplicate_removal_cost);
+ grp_summary.add("chosen", true);
+ }
best_trp= group_trp;
- best_read_time= best_trp->read_cost;
}
else
grp_summary.add("chosen", false).add("cause", "cost");
@@ -3059,11 +3125,14 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
if (best_trp)
{
records= best_trp->records;
+ impossible_range= records == 0; // No matching rows
if (!(quick= best_trp->make_quick(&param, TRUE)) || quick->init())
{
delete quick;
quick= NULL;
}
+ else
+ quick->group_by_optimization_used= group_by_optimization_used;
}
possible_keys= param.possible_keys;
@@ -3076,9 +3145,10 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
Json_writer_object trace_range_plan(thd, "range_access_plan");
best_trp->trace_basic_info(&param, &trace_range_plan);
}
- trace_range_summary.add("rows_for_plan", quick->records)
- .add("cost_for_plan", quick->read_time)
- .add("chosen", true);
+ trace_range_summary.
+ add("rows_for_plan", quick->records).
+ add("cost_for_plan", quick->read_time).
+ add("chosen", true);
}
free_root(&alloc,MYF(0)); // Return memory & allocator
@@ -3092,7 +3162,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
Assume that if the user is using 'limit' we will only need to scan
limit rows if we are using a key
*/
- DBUG_RETURN(records ? MY_TEST(quick) : -1);
+ set_if_smaller(records, table_records);
+ DBUG_RETURN(impossible_range ? -1 : MY_TEST(quick));
}
/****************************************************************************
@@ -3106,7 +3177,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
SYNOPSIS
create_key_parts_for_pseudo_indexes()
param IN/OUT data structure for the descriptors to be built
- used_fields bitmap of columns for which the descriptors are to be built
+ used_fields bitmap of columns for which the descriptors are to be built
DESCRIPTION
For each column marked in the bitmap used_fields the function builds
@@ -3203,9 +3274,10 @@ bool create_key_parts_for_pseudo_indexes(RANGE_OPT_PARAM *param,
SYNOPSIS
records_in_column_ranges()
param the data structure to access descriptors of pseudo indexes
- built over columns used in the condition of the processed query
+ built over columns used in the condition of the processed
+ query
idx the index of the descriptor of interest in param
- tree the tree representing ranges built for the interesting column
+ tree the tree representing ranges built for the interesting column
DESCRIPTION
This function retrieves the ranges represented by the SEL_ARG 'tree' and
@@ -3229,7 +3301,7 @@ double records_in_column_ranges(PARAM *param, uint idx,
SEL_ARG_RANGE_SEQ seq;
KEY_MULTI_RANGE range;
range_seq_t seq_it;
- double rows;
+ double rows, table_records;
Field *field;
uint flags= 0;
double total_rows= 0;
@@ -3284,13 +3356,17 @@ double records_in_column_ranges(PARAM *param, uint idx,
total_rows= DBL_MAX;
break;
}
- total_rows += rows;
+ total_rows+= rows;
}
if (total_rows == 0)
total_rows= MY_MIN(1, rows2double(param->table->stat_records()));
- return total_rows;
-}
+ table_records= rows2double(param->table->stat_records());
+ if (total_rows > table_records)
+ DBUG_PRINT("error", ("table_records: %g < total_records: %g",
+ table_records, total_rows));
+ return MY_MIN(total_rows, table_records);
+}
/*
@@ -3303,12 +3379,12 @@ double records_in_column_ranges(PARAM *param, uint idx,
*/
static
-int cmp_quick_ranges(TABLE *table, uint *a, uint *b)
+int cmp_quick_ranges(TABLE::OPT_RANGE **a, TABLE::OPT_RANGE **b)
{
- int tmp= CMP_NUM(table->opt_range[*a].rows, table->opt_range[*b].rows);
+ int tmp=CMP_NUM((*a)->rows, (*b)->rows);
if (tmp)
return tmp;
- return -CMP_NUM(table->opt_range[*a].key_parts, table->opt_range[*b].key_parts);
+ return -CMP_NUM((*a)->key_parts, (*b)->key_parts);
}
@@ -3324,7 +3400,8 @@ int cmp_quick_ranges(TABLE *table, uint *a, uint *b)
DESCRIPTION
This function calculates the selectivity of range conditions cond imposed
on the rows of 'table' in the processed query.
- The calculated selectivity is assigned to the field table->cond_selectivity.
+ The calculated selectivity is assigned to the field
+ table->cond_selectivity.
Selectivity is calculated as a product of selectivities imposed by:
@@ -3336,6 +3413,8 @@ int cmp_quick_ranges(TABLE *table, uint *a, uint *b)
3. Reading a few records from the table pages and checking the condition
selectivity (this is used for conditions like "column LIKE '%val%'"
where approaches #1 and #2 do not provide selectivity data).
+ 4. If the selectivity calculated by get_best_ror_intersect() is smaller,
+ use this instead.
NOTE
Currently the selectivities of range conditions over different columns are
@@ -3350,31 +3429,40 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
{
uint keynr, range_index, ranges;
MY_BITMAP *used_fields= &table->cond_set;
- double table_records= (double)table->stat_records();
- uint optimal_key_order[MAX_KEY];
+ double table_records= (double)table->stat_records(), original_selectivity;
+ TABLE::OPT_RANGE *optimal_key_order[MAX_KEY];
+ MY_BITMAP handled_columns;
+ my_bitmap_map *buf;
+ QUICK_SELECT_I *quick;
DBUG_ENTER("calculate_cond_selectivity_for_table");
- table->cond_selectivity= 1.0;
+ table->set_cond_selectivity(1.0);
if (table_records == 0)
DBUG_RETURN(FALSE);
- QUICK_SELECT_I *quick;
if ((quick=table->reginfo.join_tab->quick) &&
quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
{
- table->cond_selectivity*= (quick->records/table_records);
+ DBUG_ASSERT(table->opt_range_condition_rows <= quick->records);
+ table->set_cond_selectivity(MY_MIN(quick->records,
+ table->opt_range_condition_rows)/
+ table_records);
DBUG_RETURN(FALSE);
}
- if (!*cond)
+ if (!*cond || table->pos_in_table_list->schema_table)
+ {
+ table->set_cond_selectivity(table->opt_range_condition_rows /
+ table_records);
DBUG_RETURN(FALSE);
+ }
- if (table->pos_in_table_list->schema_table)
- DBUG_RETURN(FALSE);
-
- MY_BITMAP handled_columns;
- my_bitmap_map* buf;
+ /*
+ This should be pre-alloced so that we could use the same bitmap for all
+ tables. Would also avoid extra memory allocations if this function would
+ be called multiple times per query.
+ */
if (!(buf= (my_bitmap_map*)thd->alloc(table->s->column_bitmap_size)))
DBUG_RETURN(TRUE);
my_bitmap_init(&handled_columns, buf, table->s->fields);
@@ -3395,93 +3483,133 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
*/
for (ranges= keynr= 0 ; keynr < table->s->keys; keynr++)
if (table->opt_range_keys.is_set(keynr))
- optimal_key_order[ranges++]= keynr;
+ optimal_key_order[ranges++]= table->opt_range + keynr;
- my_qsort2(optimal_key_order, ranges,
- sizeof(optimal_key_order[0]),
- (qsort2_cmp) cmp_quick_ranges, table);
+ my_qsort(optimal_key_order, ranges,
+ sizeof(optimal_key_order[0]),
+ (qsort_cmp) cmp_quick_ranges);
for (range_index= 0 ; range_index < ranges ; range_index++)
{
- uint keynr= optimal_key_order[range_index];
+ TABLE::OPT_RANGE *range= optimal_key_order[range_index];
+ uint keynr= (uint)(range - table->opt_range);
+ uint i;
+ uint used_key_parts= range->key_parts;
+ double quick_cond_selectivity= (range->rows / table_records);
+ KEY *key_info= table->key_info + keynr;
+ KEY_PART_INFO* key_part= key_info->key_part;
+ DBUG_ASSERT(quick_cond_selectivity <= 1.0);
+
+ /*
+ Suppose, there are range conditions on these keys
+ KEY1 (col1, col2)
+ KEY2 (col2, col6)
+ KEY3 (col3, col2)
+ KEY4 (col4, col5)
+
+ We don't want to count selectivity for ranges that uses a column
+ that was used before.
+ If the first column of an index was not used before, we can use the
+ key part statistics to calculate selectivity for this column. We cannot
+ calculate statistics for any other columns as the key part statistics
+ is also depending on the values of the previous key parts and not only
+ the last key part.
+
+ In other words, if KEY1 has the smallest range, we will only use first
+ part of KEY3 and range of KEY4 to calculate selectivity.
+ */
+ for (i= 0; i < used_key_parts; i++)
{
+ if (bitmap_is_set(&handled_columns, key_part[i].fieldnr-1))
{
- uint i;
- uint used_key_parts= table->opt_range[keynr].key_parts;
- double quick_cond_selectivity= (table->opt_range[keynr].rows /
- table_records);
- KEY *key_info= table->key_info + keynr;
- KEY_PART_INFO* key_part= key_info->key_part;
- /*
- Suppose, there are range conditions on two keys
- KEY1 (col1, col2)
- KEY2 (col3, col2)
-
- we don't want to count selectivity of condition on col2 twice.
-
- First, find the longest key prefix that's made of columns whose
- selectivity wasn't already accounted for.
- */
- for (i= 0; i < used_key_parts; i++, key_part++)
+ double rec_per_key;
+ if (!i)
{
- if (bitmap_is_set(&handled_columns, key_part->fieldnr-1))
- break;
- bitmap_set_bit(&handled_columns, key_part->fieldnr-1);
- }
- if (i)
- {
- double UNINIT_VAR(selectivity_mult);
-
- /*
- There is at least 1-column prefix of columns whose selectivity has
- not yet been accounted for.
- */
- table->cond_selectivity*= quick_cond_selectivity;
- Json_writer_object selectivity_for_index(thd);
- selectivity_for_index.add("index_name", key_info->name)
- .add("selectivity_from_index",
- quick_cond_selectivity);
- if (i != used_key_parts)
- {
- /*
- Range access got us estimate for #used_key_parts.
- We need estimate for #(i-1) key parts.
- */
- double f1= key_info->actual_rec_per_key(i-1);
- double f2= key_info->actual_rec_per_key(i);
- if (f1 > 0 && f2 > 0)
- selectivity_mult= f1 / f2;
- else
- {
- /*
- No statistics available, assume the selectivity is proportional
- to the number of key parts.
- (i=0 means 1 keypart, i=1 means 2 keyparts, so use i+1)
- */
- selectivity_mult= ((double)(i+1)) / i;
- }
- table->cond_selectivity*= selectivity_mult;
- selectivity_for_index.add("selectivity_multiplier",
- selectivity_mult);
- }
/*
- We need to set selectivity for fields supported by indexes.
- For single-component indexes and for some first components
- of other indexes we do it here. For the remaining fields
- we do it later in this function, in the same way as for the
- fields not used in any indexes.
- */
- if (i == 1)
- {
- uint fieldnr= key_info->key_part[0].fieldnr;
- table->field[fieldnr-1]->cond_selectivity= quick_cond_selectivity;
- if (i != used_key_parts)
- table->field[fieldnr-1]->cond_selectivity*= selectivity_mult;
- bitmap_clear_bit(used_fields, fieldnr-1);
- }
+ We cannot use this key part for selectivity calculation as
+ key_info->actual_rec_per_key for later keys are depending on the
+ distribution of the previous key parts.
+ */
+ goto end_of_range_loop;
}
+ /*
+ A later key part was already used. We can still use key
+ statistics for the first key part to get some approximation
+ of the selectivity of this key. This can be done if the
+ first key part is a constant:
+ WHERE key1_part1=1 and key2_part1=5 and key2_part2 BETWEEN 0 and 10
+ Even if key1 is used and it also includes the field for key2_part1
+ as a key part, we can still use selectivity for key2_part1
+ */
+ if ((rec_per_key= key_info->actual_rec_per_key(0)) == 0.0 ||
+ !range->first_key_part_has_only_one_value)
+ goto end_of_range_loop;
+ /*
+ Use key distribution statistics, except if range selectivity
+ is bigger. This can happen if the used key value has more
+ than an average number of instances.
+ */
+ set_if_smaller(rec_per_key, rows2double(table->file->stats.records));
+ set_if_bigger(quick_cond_selectivity,
+ rec_per_key / table->file->stats.records);
+ used_key_parts= 1;
+ break;
}
}
+ /* Set bits only after we have checked the used columns */
+ for (i= 0; i < used_key_parts; i++, key_part++)
+ bitmap_set_bit(&handled_columns, key_part->fieldnr-1);
+
+ /*
+ There is at least 1-column prefix of columns whose selectivity has
+ not yet been accounted for.
+ */
+ table->multiply_cond_selectivity(quick_cond_selectivity);
+
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object selectivity_for_index(thd);
+ selectivity_for_index.
+ add("index_name", key_info->name).
+ add("selectivity_from_index", quick_cond_selectivity);
+ }
+ /*
+ We need to set selectivity for fields supported by indexes.
+ For single-component indexes and for some first components
+ of other indexes we do it here. For the remaining fields
+ we do it later in this function, in the same way as for the
+ fields not used in any indexes.
+ */
+ if (used_key_parts == 1)
+ {
+ uint fieldnr= key_info->key_part[0].fieldnr;
+ table->field[fieldnr-1]->cond_selectivity= quick_cond_selectivity;
+ DBUG_ASSERT(table->field[fieldnr-1]->cond_selectivity <= 1.0);
+ /*
+ Reset bit in used_fields to ensure this field is ignored in the loop
+ below.
+ */
+ bitmap_clear_bit(used_fields, fieldnr-1);
+ }
+end_of_range_loop:
+ continue;
+ }
+ /*
+ Take into account number of matching rows calculated by
+ get_best_ror_intersect() stored in table->opt_range_condition_rows
+ Use the smaller found selectivity.
+ */
+ original_selectivity= (table->opt_range_condition_rows /
+ table_records);
+ if (original_selectivity < table->cond_selectivity)
+ {
+ table->cond_selectivity= original_selectivity;
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object selectivity_for_index(thd);
+ selectivity_for_index.add("use_opt_range_condition_rows_selectivity",
+ original_selectivity);
+ }
}
selectivity_for_indexes.end();
@@ -3502,7 +3630,8 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
double rows;
init_sql_alloc(key_memory_quick_range_select_root, &alloc,
- thd->variables.range_alloc_block_size, 0, MYF(MY_THREAD_SPECIFIC));
+ thd->variables.range_alloc_block_size, 0,
+ MYF(MY_THREAD_SPECIFIC));
param.thd= thd;
param.mem_root= &alloc;
param.old_root= thd->mem_root;
@@ -3521,9 +3650,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
thd->no_errors=1;
- tree= cond[0]->get_mm_tree(&param, cond);
-
- if (!tree)
+ if (!(tree= cond[0]->get_mm_tree(&param, cond)))
goto free_alloc;
table->reginfo.impossible_range= 0;
@@ -3547,7 +3674,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
for (uint idx= 0; idx < param.keys; idx++)
{
SEL_ARG *key= tree->keys[idx];
- if (key)
+ if (key) // Quick range found for key
{
Json_writer_object selectivity_for_column(thd);
selectivity_for_column.add("column_name", key->field->field_name);
@@ -3555,8 +3682,10 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
{
rows= 0;
table->reginfo.impossible_range= 1;
- selectivity_for_column.add("selectivity_from_histogram", rows);
- selectivity_for_column.add("cause", "impossible range");
+ if (unlikely(selectivity_for_column.trace_started()))
+ selectivity_for_column.
+ add("selectivity_from_histogram", rows).
+ add("cause", "impossible range");
goto free_alloc;
}
else
@@ -3565,6 +3694,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
if (rows != DBL_MAX)
{
key->field->cond_selectivity= rows/table_records;
+ DBUG_ASSERT(key->field->cond_selectivity <= 1.0);
selectivity_for_column.add("selectivity_from_histogram",
key->field->cond_selectivity);
}
@@ -3579,7 +3709,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
table_field->cond_selectivity < 1.0)
{
if (!bitmap_is_set(&handled_columns, table_field->field_index))
- table->cond_selectivity*= table_field->cond_selectivity;
+ table->multiply_cond_selectivity(table_field->cond_selectivity);
}
}
@@ -3591,12 +3721,6 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
}
selectivity_for_columns.end();
- if (quick && (quick->get_type() == QUICK_SELECT_I::QS_TYPE_ROR_UNION ||
- quick->get_type() == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE))
- {
- table->cond_selectivity*= (quick->records/table_records);
- }
-
bitmap_union(used_fields, &handled_columns);
/* Check if we can improve selectivity estimates by using sampling */
@@ -3634,7 +3758,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
DBUG_PRINT("info", ("The predicate selectivity : %g",
(double)stat->positive / examined_rows));
double selectivity= ((double)stat->positive) / examined_rows;
- table->cond_selectivity*= selectivity;
+ table->multiply_cond_selectivity(selectivity);
/*
If a field is involved then we register its selectivity in case
there in an equality with the field.
@@ -4073,6 +4197,20 @@ end:
table->all_partitions_pruned_away= true;
retval= TRUE;
}
+
+ if (unlikely(thd->trace_started()))
+ {
+ String parts;
+ String_list parts_list;
+
+ make_used_partitions_str(thd->mem_root, prune_param.part_info, &parts,
+ parts_list);
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_prune(thd, "prune_partitions");
+ trace_prune.add_table_name(table);
+ trace_prune.add("used_partitions", parts.c_ptr());
+ }
+
DBUG_RETURN(retval);
}
@@ -4985,17 +5123,33 @@ static void dbug_print_singlepoint_range(SEL_ARG **start, uint num)
Get cost of 'sweep' full records retrieval.
SYNOPSIS
get_sweep_read_cost()
- param Parameter from test_quick_select
- records # of records to be retrieved
+ param Parameter from test_quick_select
+ records # of records to be retrieved
+ add_time_for_compare If set, add cost of WHERE clause (WHERE_COST)
RETURN
cost of sweep
*/
-double get_sweep_read_cost(const PARAM *param, ha_rows records)
+static double get_sweep_read_cost(const PARAM *param, double records,
+ bool add_time_for_compare)
{
+ DBUG_ENTER("get_sweep_read_cost");
+#ifndef OLD_SWEEP_COST
+ handler *file= param->table->file;
+ IO_AND_CPU_COST engine_cost= file->ha_rnd_pos_call_time(double2rows(ceil(records)));
+ double cost;
+ if (add_time_for_compare)
+ {
+ engine_cost.cpu+= records * param->thd->variables.optimizer_where_cost;
+ }
+ cost= file->cost(engine_cost);
+
+ DBUG_PRINT("return", ("cost: %g", cost));
+ DBUG_RETURN(cost);
+#else
double result;
uint pk= param->table->s->primary_key;
- DBUG_ENTER("get_sweep_read_cost");
+
if (param->table->file->pk_is_clustering_key(pk) ||
param->table->file->stats.block_size == 0 /* HEAP */)
{
@@ -5003,12 +5157,12 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
We are using the primary key to find the rows.
Calculate the cost for this.
*/
- result= param->table->file->read_time(pk, (uint)records, records);
+ result= table->file->ha_rnd_pos_call_time(records);
}
else
{
/*
- Rows will be retreived with rnd_pos(). Caluclate the expected
+ Rows will be retreived with rnd_pos(). Calculate the expected
cost for this.
*/
double n_blocks=
@@ -5041,9 +5195,11 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
*/
result= busy_blocks;
}
+ result+= rows2double(n_rows) * param->table->file->ROW_COPY_COST);
}
DBUG_PRINT("return",("cost: %g", result));
DBUG_RETURN(result);
+#endif /* OLD_SWEEP_COST */
}
@@ -5114,7 +5270,9 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
static
TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
- double read_time, bool named_trace)
+ double read_time, ha_rows limit,
+ bool named_trace,
+ bool using_table_scan)
{
SEL_TREE **ptree;
TRP_INDEX_MERGE *imerge_trp= NULL;
@@ -5178,7 +5336,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
"tree in SEL_IMERGE"););
Json_writer_object trace_idx(thd);
if (!(*cur_child= get_key_scans_params(param, *ptree, TRUE, FALSE,
- read_time)))
+ read_time, limit, using_table_scan)))
{
/*
One of index scans in this index_merge is more expensive than entire
@@ -5197,16 +5355,17 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
imerge_cost += (*cur_child)->read_cost;
all_scans_ror_able &= ((*ptree)->n_ror_scans > 0);
all_scans_rors &= (*cur_child)->is_ror;
- if (param->table->file->is_clustering_key(param->real_keynr[(*cur_child)->key_idx]))
+ if (param->table->file->is_clustering_key(keynr_in_table))
{
cpk_scan= cur_child;
cpk_scan_records= (*cur_child)->records;
}
else
non_cpk_scan_records += (*cur_child)->records;
- trace_idx.add("index_to_merge",
- param->table->key_info[keynr_in_table].name)
- .add("cumulated_cost", imerge_cost);
+ if (unlikely(trace_idx.trace_started()))
+ trace_idx.
+ add("index_to_merge", param->table->key_info[keynr_in_table].name).
+ add("cumulated_cost", imerge_cost);
}
to_merge.end();
@@ -5238,9 +5397,10 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
optimizer_flag(param->thd, OPTIMIZER_SWITCH_INDEX_MERGE_UNION))
{
roru_read_plans= (TABLE_READ_PLAN**)range_scans;
- trace_best_disjunct.add("use_roworder_union", true)
- .add("cause",
- "always cheaper than non roworder retrieval");
+ if (unlikely(trace_best_disjunct.trace_started()))
+ trace_best_disjunct.
+ add("use_roworder_union", true).
+ add("cause", "always cheaper than non roworder retrieval");
goto skip_to_ror_scan;
}
@@ -5250,8 +5410,8 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
Add one ROWID comparison for each row retrieved on non-CPK scan. (it
is done in QUICK_RANGE_SELECT::row_in_ranges)
*/
- double rid_comp_cost= (rows2double(non_cpk_scan_records) /
- TIME_FOR_COMPARE_ROWID);
+ double rid_comp_cost= (rows2double(non_cpk_scan_records) *
+ default_optimizer_costs.rowid_cmp_cost);
imerge_cost+= rid_comp_cost;
trace_best_disjunct.add("cost_of_mapping_rowid_in_non_clustered_pk_scan",
rid_comp_cost);
@@ -5259,18 +5419,24 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
/* Calculate cost(rowid_to_row_scan) */
{
- double sweep_cost= get_sweep_read_cost(param, non_cpk_scan_records);
+ /* imerge_cost already includes WHERE_COST */
+ double sweep_cost= get_sweep_read_cost(param, rows2double(non_cpk_scan_records), 0);
imerge_cost+= sweep_cost;
- trace_best_disjunct.add("cost_sort_rowid_and_read_disk", sweep_cost);
+ trace_best_disjunct.
+ add("rows", non_cpk_scan_records).
+ add("cost_sort_rowid_and_read_disk", sweep_cost).
+ add("cost", imerge_cost);
}
DBUG_PRINT("info",("index_merge cost with rowid-to-row scan: %g",
imerge_cost));
if (imerge_cost > read_time ||
!optimizer_flag(param->thd, OPTIMIZER_SWITCH_INDEX_MERGE_SORT_UNION))
{
- trace_best_disjunct.add("use_roworder_index_merge", true);
- trace_best_disjunct.add("cause", "cost");
- goto build_ror_index_merge;
+ if (unlikely(trace_best_disjunct.trace_started()))
+ trace_best_disjunct.
+ add("use_sort_index_merge", false).
+ add("cause", imerge_cost > read_time ? "cost" : "disabled");
+ goto build_ror_index_merge; // Try roworder_index_merge
}
/* Add Unique operations cost */
@@ -5287,15 +5453,17 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
}
{
- const double dup_removal_cost= Unique::get_use_cost(
+ const double dup_removal_cost= Unique::get_use_cost(thd,
param->imerge_cost_buff, (uint)non_cpk_scan_records,
param->table->file->ref_length,
(size_t)param->thd->variables.sortbuff_size,
- TIME_FOR_COMPARE_ROWID,
+ ROWID_COMPARE_COST_THD(param->thd),
FALSE, NULL);
imerge_cost+= dup_removal_cost;
- trace_best_disjunct.add("cost_duplicate_removal", dup_removal_cost)
- .add("total_cost", imerge_cost);
+ if (unlikely(trace_best_disjunct.trace_started()))
+ trace_best_disjunct.
+ add("cost_duplicate_removal", dup_removal_cost).
+ add("total_cost", imerge_cost);
}
DBUG_PRINT("info",("index_merge total cost: %g (wanted: less then %g)",
@@ -5359,31 +5527,28 @@ skip_to_ror_scan:
double cost;
if ((*cur_child)->is_ror)
{
+ handler *file= param->table->file;
/* Ok, we have index_only cost, now get full rows scan cost */
- cost= param->table->file->
- read_time(param->real_keynr[(*cur_child)->key_idx], 1,
- (*cur_child)->records) +
- rows2double((*cur_child)->records) / TIME_FOR_COMPARE;
+ cost= file->cost(file->ha_rnd_pos_call_and_compare_time((*cur_child)->records));
}
else
cost= read_time;
TABLE_READ_PLAN *prev_plan= *cur_child;
- if (!(*cur_roru_plan= get_best_ror_intersect(param, *ptree, cost,
- &dummy)))
+ TRP_ROR_INTERSECT *ror_trp;
+ if (!(*cur_roru_plan= ror_trp= get_best_ror_intersect(param, *ptree, cost,
+ &dummy)))
{
- if (prev_plan->is_ror)
- *cur_roru_plan= prev_plan;
- else
+ if (!prev_plan->is_ror)
DBUG_RETURN(imerge_trp);
+ *cur_roru_plan= prev_plan;
roru_index_costs += (*cur_roru_plan)->read_cost;
}
else
- roru_index_costs +=
- ((TRP_ROR_INTERSECT*)(*cur_roru_plan))->index_scan_costs;
+ roru_index_costs += ror_trp->index_scan_costs;
roru_total_records += (*cur_roru_plan)->records;
- roru_intersect_part *= (*cur_roru_plan)->records /
- param->table->stat_records();
+ roru_intersect_part *= ((*cur_roru_plan)->records /
+ param->table->stat_records());
}
trace_analyze_ror.end();
/*
@@ -5404,15 +5569,17 @@ skip_to_ror_scan:
*/
double roru_total_cost;
- roru_total_cost= roru_index_costs +
- rows2double(roru_total_records)*log((double)n_child_scans) /
- (TIME_FOR_COMPARE_ROWID * M_LN2) +
- get_sweep_read_cost(param, roru_total_records);
+ roru_total_cost= (roru_index_costs +
+ rows2double(roru_total_records)*log((double)n_child_scans) *
+ ROWID_COMPARE_COST_THD(param->thd) / M_LN2 +
+ get_sweep_read_cost(param, rows2double(roru_total_records), 0));
DBUG_PRINT("info", ("ROR-union: cost %g, %zu members",
roru_total_cost, n_child_scans));
- trace_best_disjunct.add("index_roworder_union_cost", roru_total_cost)
- .add("members", n_child_scans);
+ if (unlikely(trace_best_disjunct.trace_started()))
+ trace_best_disjunct.
+ add("index_roworder_union_cost", roru_total_cost).
+ add("members", n_child_scans);
TRP_ROR_UNION* roru;
if (roru_total_cost < read_time)
{
@@ -5438,7 +5605,7 @@ skip_to_ror_scan:
SYNOPSIS
merge_same_index_scans()
param Context info for the operation
- imerge IN/OUT SEL_IMERGE from which imerge_trp has been extracted
+ imerge IN/OUT SEL_IMERGE from which imerge_trp has been extracted
imerge_trp The index merge plan where index scans for the same
indexes are to be merges
read_time The upper bound for the cost of the plan to be evaluated
@@ -5519,7 +5686,8 @@ TABLE_READ_PLAN *merge_same_index_scans(PARAM *param, SEL_IMERGE *imerge,
DBUG_ASSERT(imerge->trees_next>imerge->trees);
if (imerge->trees_next-imerge->trees > 1)
- trp= get_best_disjunct_quick(param, imerge, read_time, true);
+ trp= get_best_disjunct_quick(param, imerge, read_time, HA_POS_ERROR, true,
+ 0);
else
{
/*
@@ -5560,8 +5728,8 @@ typedef struct st_common_index_intersect_info
{
PARAM *param; /* context info for range optimizations */
uint key_size; /* size of a ROWID element stored in Unique object */
- double compare_factor; /* 1/compare - cost to compare two ROWIDs */
- size_t max_memory_size; /* maximum space allowed for Unique objects */
+ double compare_factor; /* cost to compare two ROWIDs */
+ size_t max_memory_size; /* maximum space allowed for Unique objects */
ha_rows table_cardinality; /* estimate of the number of records in table */
double cutoff_cost; /* discard index intersects with greater costs */
INDEX_SCAN_INFO *cpk_scan; /* clustered primary key used in intersection */
@@ -5661,8 +5829,7 @@ bool create_fields_bitmap(PARAM *param, MY_BITMAP *fields_bitmap)
static
int cmp_intersect_index_scan(INDEX_SCAN_INFO **a, INDEX_SCAN_INFO **b)
{
- return (*a)->records < (*b)->records ?
- -1 : (*a)->records == (*b)->records ? 0 : 1;
+ return CMP_NUM((*a)->records, (*b)->records);
}
@@ -5775,7 +5942,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
common->param= param;
common->key_size= table->file->ref_length;
- common->compare_factor= TIME_FOR_COMPARE_ROWID;
+ common->compare_factor= ROWID_COMPARE_COST_THD(param->thd);
common->max_memory_size= (size_t)param->thd->variables.sortbuff_size;
common->cutoff_cost= cutoff_cost;
common->cpk_scan= NULL;
@@ -5816,26 +5983,30 @@ bool prepare_search_best_index_intersect(PARAM *param,
if (*index_scan == cpk_scan)
{
- idx_scan.add("chosen", "false")
- .add("cause", "clustered index used for filtering");
+ if (unlikely(idx_scan.trace_started()))
+ idx_scan.
+ add("chosen", "false").
+ add("cause", "clustered index used for filtering");
continue;
}
if (cpk_scan && cpk_scan->used_key_parts >= used_key_parts &&
same_index_prefix(cpk_scan->key_info, key_info, used_key_parts))
{
- idx_scan.add("chosen", "false")
- .add("cause", "clustered index used for filtering");
+ if (unlikely(idx_scan.trace_started()))
+ idx_scan.
+ add("chosen", "false").
+ add("cause", "clustered index used for filtering");
continue;
}
- cost= table->opt_range[(*index_scan)->keynr].index_only_cost;
+ cost= table->opt_range[(*index_scan)->keynr].index_only_fetch_cost(table);
idx_scan.add("cost", cost);
- if (cost >= cutoff_cost)
+ if (cost + COST_EPS >= cutoff_cost)
{
- idx_scan.add("chosen", false);
- idx_scan.add("cause", "cost");
+ if (unlikely(idx_scan.trace_started()))
+ idx_scan.add("chosen", false).add("cause", "cost");
continue;
}
@@ -5854,15 +6025,18 @@ bool prepare_search_best_index_intersect(PARAM *param,
}
if (!*scan_ptr || cost < (*scan_ptr)->index_read_cost)
{
- idx_scan.add("chosen", true);
- if (!*scan_ptr)
- idx_scan.add("cause", "first occurrence of index prefix");
- else
- idx_scan.add("cause", "better cost for same idx prefix");
+ if (unlikely(idx_scan.trace_started()))
+ {
+ idx_scan.add("chosen", true);
+ if (!*scan_ptr)
+ idx_scan.add("cause", "first occurrence of index prefix");
+ else
+ idx_scan.add("cause", "better cost for same idx prefix");
+ }
*scan_ptr= *index_scan;
(*scan_ptr)->index_read_cost= cost;
}
- else
+ else if (unlikely(idx_scan.trace_started()))
{
idx_scan.add("chosen", false).add("cause", "cost");
}
@@ -5887,7 +6061,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
return TRUE;
common->best_uses_cpk= FALSE;
- common->best_cost= cutoff_cost + COST_EPS;
+ common->best_cost= cutoff_cost;
common->best_length= 0;
if (!(common->best_intersect=
@@ -5925,13 +6099,14 @@ bool prepare_search_best_index_intersect(PARAM *param,
ha_rows records= records_in_index_intersect_extension(&curr, *scan_ptr);
(*scan_ptr)->filtered_out= records >= scan_records ?
0 : scan_records-records;
- if (thd->trace_started())
+ if (unlikely(thd->trace_started()))
{
Json_writer_object selected_idx(thd);
selected_idx.add("index", key_info->name);
print_keyparts(thd, key_info, (*scan_ptr)->used_key_parts);
- selected_idx.add("records", (*scan_ptr)->records)
- .add("filtered_records", (*scan_ptr)->filtered_out);
+ selected_idx.
+ add("rows", (*scan_ptr)->records).
+ add("filtered_records", (*scan_ptr)->filtered_out);
}
}
}
@@ -5941,13 +6116,14 @@ bool prepare_search_best_index_intersect(PARAM *param,
{
KEY *key_info= (*scan_ptr)->key_info;
(*scan_ptr)->filtered_out= 0;
- if (thd->trace_started())
+ if (unlikely(thd->trace_started()))
{
Json_writer_object selected_idx(thd);
selected_idx.add("index", key_info->name);
print_keyparts(thd, key_info, (*scan_ptr)->used_key_parts);
- selected_idx.add("records", (*scan_ptr)->records)
- .add("filtered_records", (*scan_ptr)->filtered_out);
+ selected_idx.
+ add("rows", (*scan_ptr)->records).
+ add("filtered_records", (*scan_ptr)->filtered_out);
}
}
}
@@ -6187,8 +6363,8 @@ double get_cpk_filter_cost(ha_rows filtered_records,
INDEX_SCAN_INFO *cpk_scan,
double compare_factor)
{
- return log((double) (cpk_scan->range_count+1)) / (compare_factor * M_LN2) *
- filtered_records;
+ return (log((double) (cpk_scan->range_count+1)) * compare_factor / M_LN2 *
+ filtered_records);
}
@@ -6212,7 +6388,8 @@ double get_cpk_filter_cost(ha_rows filtered_records,
*/
static
-bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
+bool check_index_intersect_extension(THD *thd,
+ PARTIAL_INDEX_INTERSECT_INFO *curr,
INDEX_SCAN_INFO *ext_index_scan,
PARTIAL_INDEX_INTERSECT_INFO *next)
{
@@ -6224,9 +6401,19 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
COMMON_INDEX_INTERSECT_INFO *common_info= curr->common_info;
double cutoff_cost= common_info->cutoff_cost;
uint idx= curr->length;
+ Json_writer_object trace(thd, "check_index_intersect_extension");
+
next->index_read_cost= curr->index_read_cost+ext_index_scan->index_read_cost;
if (next->index_read_cost > cutoff_cost)
- return FALSE;
+ {
+ if (unlikely(trace.trace_started()))
+ trace.
+ add("index", ext_index_scan->key_info->name.str).
+ add("cost", next->index_read_cost).
+ add("chosen", false).
+ add("cause", "cost");
+ return FALSE;
+ }
if ((next->in_memory= curr->in_memory))
next->in_memory_cost= curr->in_memory_cost;
@@ -6250,16 +6437,18 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
common_info->compare_factor)*
ext_index_scan_records;
cost= next->in_memory_cost;
+
}
else
{
uint *buff_elems= common_info->buff_elems;
uint key_size= common_info->key_size;
double compare_factor= common_info->compare_factor;
- size_t max_memory_size= common_info->max_memory_size;
-
+ size_t max_memory_size= common_info->max_memory_size;
+
records_sent_to_unique+= ext_index_scan_records;
- cost= Unique::get_use_cost(buff_elems, (size_t) records_sent_to_unique, key_size,
+ cost= Unique::get_use_cost(thd, buff_elems, (size_t) records_sent_to_unique,
+ key_size,
max_memory_size, compare_factor, TRUE,
&next->in_memory);
if (records_filtered_out_by_cpk)
@@ -6269,7 +6458,7 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
double cost2;
bool in_memory2;
ha_rows records2= records_sent_to_unique-records_filtered_out_by_cpk;
- cost2= Unique::get_use_cost(buff_elems, (size_t) records2, key_size,
+ cost2= Unique::get_use_cost(thd, buff_elems, (size_t) records2, key_size,
max_memory_size, compare_factor, TRUE,
&in_memory2);
cost2+= get_cpk_filter_cost(ext_index_scan_records, common_info->cpk_scan,
@@ -6286,6 +6475,19 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
if (next->in_memory)
next->in_memory_cost= cost;
}
+ if (unlikely(trace.trace_started()))
+ {
+ trace.
+ add("index", ext_index_scan->key_info->name.str).
+ add("in_memory", next->in_memory).
+ add("range_rows", ext_index_scan_records).
+ add("rows_sent_to_unique", records_sent_to_unique).
+ add("unique_cost", cost).
+ add("index_read_cost", next->index_read_cost);
+ if (next->use_cpk_filter)
+ trace.add("rows_filtered_out_by_clustered_pk", records_filtered_out_by_cpk);
+ }
+
if (next->use_cpk_filter)
{
@@ -6297,20 +6499,37 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
records= records_in_index_intersect_extension(curr, ext_index_scan);
if (idx && records > curr->records)
+ {
+ if (unlikely(trace.trace_started()))
+ trace.
+ add("rows", records).
+ add("chosen", false).
+ add("cause", "too many rows");
return FALSE;
+ }
if (next->use_cpk_filter && curr->filtered_scans.is_clear_all())
records-= records_filtered_out_by_cpk;
next->records= records;
cost+= next->index_read_cost;
if (cost >= cutoff_cost)
+ {
+ if (unlikely(trace.trace_started()))
+ trace.add("cost", cost).add("chosen", false).add("cause", "cost");
return FALSE;
+ }
- cost+= get_sweep_read_cost(common_info->param, records);
+ /*
+ The cost after sweeep can be bigger than cutoff, but that is ok as the
+ end cost can decrease when we add the next index.
+ */
+ cost+= get_sweep_read_cost(common_info->param, rows2double(records), 1);
next->cost= cost;
next->length= curr->length+1;
+ if (unlikely(trace.trace_started()))
+ trace.add("rows", records).add("cost", cost).add("chosen", true);
return TRUE;
}
@@ -6329,7 +6548,8 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
*/
static
-void find_index_intersect_best_extension(PARTIAL_INDEX_INTERSECT_INFO *curr)
+void find_index_intersect_best_extension(THD *thd,
+ PARTIAL_INDEX_INTERSECT_INFO *curr)
{
PARTIAL_INDEX_INTERSECT_INFO next;
COMMON_INDEX_INTERSECT_INFO *common_info= curr->common_info;
@@ -6356,14 +6576,18 @@ void find_index_intersect_best_extension(PARTIAL_INDEX_INTERSECT_INFO *curr)
next.common_info= common_info;
+ Json_writer_array potential_index_intersect(thd, "potential_index_intersect");
+
INDEX_SCAN_INFO *rem_first_index_scan= *rem_first_index_scan_ptr;
for (INDEX_SCAN_INFO **index_scan_ptr= rem_first_index_scan_ptr;
*index_scan_ptr; index_scan_ptr++)
{
+ Json_writer_object selected(thd);
*rem_first_index_scan_ptr= *index_scan_ptr;
*index_scan_ptr= rem_first_index_scan;
- if (check_index_intersect_extension(curr, *rem_first_index_scan_ptr, &next))
- find_index_intersect_best_extension(&next);
+ if (check_index_intersect_extension(thd, curr, *rem_first_index_scan_ptr,
+ &next))
+ find_index_intersect_best_extension(thd, &next);
*index_scan_ptr= *rem_first_index_scan_ptr;
*rem_first_index_scan_ptr= rem_first_index_scan;
}
@@ -6407,16 +6631,18 @@ TRP_INDEX_INTERSECT *get_best_index_intersect(PARAM *param, SEL_TREE *tree,
TRP_INDEX_INTERSECT *intersect_trp= NULL;
TABLE *table= param->table;
THD *thd= param->thd;
-
DBUG_ENTER("get_best_index_intersect");
Json_writer_object trace_idx_interect(thd, "analyzing_sort_intersect");
+ if (unlikely(trace_idx_interect.trace_started()))
+ trace_idx_interect.add("cutoff_cost", read_time);
+
if (prepare_search_best_index_intersect(param, tree, &common, &init,
read_time))
DBUG_RETURN(NULL);
- find_index_intersect_best_extension(&init);
+ find_index_intersect_best_extension(thd, &init);
if (common.best_length <= 1 && !common.best_uses_cpk)
DBUG_RETURN(NULL);
@@ -6475,13 +6701,15 @@ TRP_INDEX_INTERSECT *get_best_index_intersect(PARAM *param, SEL_TREE *tree,
{
intersect_trp->read_cost= common.best_cost;
- intersect_trp->records= common.best_records;
+ intersect_trp->records= common.best_records;
intersect_trp->range_scans= range_scans;
intersect_trp->range_scans_end= cur_range;
intersect_trp->filtered_scans= common.filtered_scans;
- trace_idx_interect.add("rows", intersect_trp->records)
- .add("cost", intersect_trp->read_cost)
- .add("chosen",true);
+ if (unlikely(trace_idx_interect.trace_started()))
+ trace_idx_interect.
+ add("rows", intersect_trp->records).
+ add("cost", intersect_trp->read_cost).
+ add("chosen",true);
}
DBUG_RETURN(intersect_trp);
}
@@ -6497,11 +6725,12 @@ void TRP_ROR_INTERSECT::trace_basic_info(PARAM *param,
THD *thd= param->thd;
DBUG_ASSERT(trace_object->trace_started());
- trace_object->add("type", "index_roworder_intersect");
- trace_object->add("rows", records);
- trace_object->add("cost", read_cost);
- trace_object->add("covering", is_covering);
- trace_object->add("clustered_pk_scan", cpk_scan != NULL);
+ trace_object->
+ add("type", "index_roworder_intersect").
+ add("rows", records).
+ add("cost", read_cost).
+ add("covering", is_covering).
+ add("clustered_pk_scan", cpk_scan != NULL);
Json_writer_array smth_trace(thd, "intersect_of");
for (ROR_SCAN_INFO **cur_scan= first_scan; cur_scan != last_scan;
@@ -6511,9 +6740,10 @@ void TRP_ROR_INTERSECT::trace_basic_info(PARAM *param,
const KEY_PART_INFO *key_part= cur_key.key_part;
Json_writer_object trace_isect_idx(thd);
- trace_isect_idx.add("type", "range_scan");
- trace_isect_idx.add("index", cur_key.name);
- trace_isect_idx.add("rows", (*cur_scan)->records);
+ trace_isect_idx.
+ add("type", "range_scan").
+ add("index", cur_key.name).
+ add("rows", (*cur_scan)->records);
Json_writer_array trace_range(thd, "ranges");
@@ -6544,6 +6774,7 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
ROR_SCAN_INFO *ror_scan;
my_bitmap_map *bitmap_buf;
uint keynr;
+ handler *file= param->table->file;
DBUG_ENTER("make_ror_scan");
if (!(ror_scan= (ROR_SCAN_INFO*)alloc_root(param->mem_root,
@@ -6553,7 +6784,7 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
ror_scan->idx= idx;
ror_scan->keynr= keynr= param->real_keynr[idx];
ror_scan->key_rec_length= (param->table->key_info[keynr].key_length +
- param->table->file->ref_length);
+ file->ref_length);
ror_scan->sel_arg= sel_arg;
ror_scan->records= param->quick_rows[keynr];
@@ -6574,8 +6805,14 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
if (bitmap_is_set(&param->needed_fields, key_part->fieldnr-1))
bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr-1);
}
+
+ /*
+ Cost of reading the keys for the rows, which are later stored in the
+ ror queue.
+ */
ror_scan->index_read_cost=
- param->table->file->keyread_time(ror_scan->keynr, 1, ror_scan->records);
+ file->cost(file->ha_keyread_and_copy_time(ror_scan->keynr, 1,
+ ror_scan->records, 0));
DBUG_RETURN(ror_scan);
}
@@ -6885,7 +7122,7 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
avoid duplicating the inference code)
NOTES
- Adding a ROR scan to ROR-intersect "makes sense" iff the cost of ROR-
+ Adding a ROR scan to ROR-intersect "makes sense" if the cost of ROR-
intersection decreases. The cost of ROR-intersection is calculated as
follows:
@@ -6933,11 +7170,11 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
{
/*
CPK scan is used to filter out rows. We apply filtering for
- each record of every scan. Assuming 1/TIME_FOR_COMPARE_ROWID
+ each record of every scan. Assuming ROWID_COMPARE_COST
per check this gives us:
*/
- const double idx_cost= rows2double(info->index_records) /
- TIME_FOR_COMPARE_ROWID;
+ const double idx_cost= (rows2double(info->index_records) *
+ ROWID_COMPARE_COST_THD(info->param->thd));
info->index_scan_costs+= idx_cost;
trace_costs->add("index_scan_cost", idx_cost);
}
@@ -6960,14 +7197,15 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
DBUG_PRINT("info", ("info->total_cost: %g", info->total_cost));
if (!info->is_covering)
{
- double sweep_cost= get_sweep_read_cost(info->param,
- double2rows(info->out_rows));
+ double sweep_cost= get_sweep_read_cost(info->param, info->out_rows, 1);
info->total_cost+= sweep_cost;
trace_costs->add("disk_sweep_cost", sweep_cost);
DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost));
}
else
- trace_costs->add("disk_sweep_cost", 0);
+ {
+ trace_costs->add("disk_sweep_cost", static_cast<longlong>(0));
+ }
DBUG_PRINT("info", ("New out_rows: %g", info->out_rows));
DBUG_PRINT("info", ("New cost: %g, %scovering", info->total_cost,
@@ -7046,9 +7284,13 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
bool *are_all_covering)
{
uint idx;
- double min_cost= DBL_MAX;
- DBUG_ENTER("get_best_ror_intersect");
+ double min_cost= DBL_MAX, cmp_cost;
THD *thd= param->thd;
+ DBUG_ENTER("get_best_ror_intersect");
+ DBUG_PRINT("enter", ("opt_range_condition_rows: %llu cond_selectivity: %g",
+ (ulonglong) param->table->opt_range_condition_rows,
+ param->table->cond_selectivity));
+
Json_writer_object trace_ror(thd, "analyzing_roworder_intersect");
if ((tree->n_ror_scans < 2) || !param->table->stat_records() ||
@@ -7082,8 +7324,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
if (!tree->ror_scans_map.is_set(idx))
continue;
key_no= param->real_keynr[idx];
- if (key_no != cpk_no &&
- param->table->file->index_flags(key_no,0,0) & HA_CLUSTERED_INDEX)
+ if (key_no != cpk_no && param->table->file->is_clustering_key(key_no))
{
/* Ignore clustering keys */
tree->n_ror_scans--;
@@ -7143,31 +7384,40 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
/* S= S + first(R); R= R - first(R); */
if (!ror_intersect_add(intersect, *cur_ror_scan, &trace_idx, FALSE))
{
- trace_idx.add("usable", false)
- .add("cause", "does not reduce cost of intersect");
+ trace_idx.
+ add("usable", false).
+ add("cause", "does not reduce cost of intersect");
cur_ror_scan++;
continue;
}
- trace_idx.add("cumulative_total_cost", intersect->total_cost)
- .add("usable", true)
- .add("matching_rows_now", intersect->out_rows)
- .add("intersect_covering_with_this_index", intersect->is_covering);
+ trace_idx.
+ add("cumulative_total_cost", intersect->total_cost).
+ add("usable", true).
+ add("matching_rows_now", intersect->out_rows).
+ add("intersect_covering_with_this_index", intersect->is_covering);
*(intersect_scans_end++)= *(cur_ror_scan++);
+ /*
+ Check if intersect gives a lower cost.
+ The first ror scan is always accepted.
+ The next ror scan is only accepted if the total cost went down
+ (Enough rows where reject to offset the intersect cost)
+ */
if (intersect->total_cost < min_cost)
{
/* Local minimum found, save it */
+ min_cost= intersect->total_cost;
ror_intersect_cpy(intersect_best, intersect);
intersect_scans_best= intersect_scans_end;
- min_cost = intersect->total_cost;
trace_idx.add("chosen", true);
}
else
{
- trace_idx.add("chosen", false)
- .add("cause", "does not reduce cost");
+ trace_idx.
+ add("chosen", false).
+ add("cause", "does not reduce cost");
}
}
trace_isect_idx.end();
@@ -7175,8 +7425,9 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
if (intersect_scans_best == intersect_scans)
{
DBUG_PRINT("info", ("None of scans increase selectivity"));
- trace_ror.add("chosen", false)
- .add("cause","does not increase selectivity");
+ trace_ror.
+ add("chosen", false).
+ add("cause","does not increase selectivity");
DBUG_RETURN(NULL);
}
@@ -7200,32 +7451,46 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
if (ror_intersect_add(intersect, cpk_scan, &trace_cpk, TRUE) &&
(intersect->total_cost < min_cost))
{
- trace_cpk.add("clustered_pk_scan_added_to_intersect", true)
- .add("cumulated_cost", intersect->total_cost);
+ min_cost= intersect->total_cost;
+ if (trace_cpk.trace_started())
+ trace_cpk.
+ add("clustered_pk_scan_added_to_intersect", true).
+ add("cumulated_cost", intersect->total_cost);
intersect_best= intersect; //just set pointer here
}
else
{
- trace_cpk.add("clustered_pk_added_to_intersect", false)
- .add("cause", "cost");
+ if (trace_cpk.trace_started())
+ trace_cpk.
+ add("clustered_pk_added_to_intersect", false).
+ add("cause", "cost");
cpk_scan= 0; // Don't use cpk_scan
}
}
else
{
- trace_cpk.add("clustered_pk_added_to_intersect", false)
- .add("cause", cpk_scan ? "roworder is covering"
- : "no clustered pk index");
+ trace_cpk.
+ add("clustered_pk_added_to_intersect", false).
+ add("cause", cpk_scan ? "roworder is covering"
+ : "no clustered pk index");
cpk_scan= 0; // Don't use cpk_scan
}
trace_cpk.end();
+ /*
+ Adjust row count and add the cost of comparing the final rows to the
+ WHERE clause
+ */
+ cmp_cost= intersect_best->out_rows * thd->variables.optimizer_where_cost;
+
/* Ok, return ROR-intersect plan if we have found one */
TRP_ROR_INTERSECT *trp= NULL;
- if (min_cost < read_time && (cpk_scan || best_num > 1))
+ if (min_cost + cmp_cost < read_time && (cpk_scan || best_num > 1))
{
+ double best_rows= intersect_best->out_rows;
+ set_if_bigger(best_rows, 1);
if (!(trp= new (param->mem_root) TRP_ROR_INTERSECT))
- DBUG_RETURN(trp);
+ DBUG_RETURN(NULL);
if (!(trp->first_scan=
(ROR_SCAN_INFO**)alloc_root(param->mem_root,
sizeof(ROR_SCAN_INFO*)*best_num)))
@@ -7233,30 +7498,30 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
memcpy(trp->first_scan, intersect_scans, best_num*sizeof(ROR_SCAN_INFO*));
trp->last_scan= trp->first_scan + best_num;
trp->is_covering= intersect_best->is_covering;
- trp->read_cost= intersect_best->total_cost;
- /* Prevent divisons by zero */
- ha_rows best_rows = double2rows(intersect_best->out_rows);
- if (!best_rows)
- best_rows= 1;
- set_if_smaller(param->table->opt_range_condition_rows, best_rows);
- trp->records= best_rows;
+ trp->read_cost= min_cost + cmp_cost;
+ param->table->set_opt_range_condition_rows((ha_rows)best_rows);
+ trp->records= (ha_rows)best_rows;
trp->index_scan_costs= intersect_best->index_scan_costs;
trp->cpk_scan= cpk_scan;
DBUG_PRINT("info", ("Returning non-covering ROR-intersect plan:"
"cost %g, records %lu",
trp->read_cost, (ulong) trp->records));
- trace_ror.add("rows", trp->records)
- .add("cost", trp->read_cost)
- .add("covering", trp->is_covering)
- .add("chosen", true);
+ if (unlikely(trace_ror.trace_started()))
+ trace_ror.
+ add("rows", trp->records).
+ add("cost", trp->read_cost).
+ add("covering", trp->is_covering).
+ add("chosen", true);
}
else
{
- trace_ror.add("chosen", false)
- .add("cause", (read_time > min_cost)
- ? "too few indexes to merge"
- : "cost");
+ trace_ror.
+ add("chosen", false).
+ add("cause", (min_cost + cmp_cost >= read_time) ?
+ "cost" : "too few indexes to merge");
}
+ DBUG_PRINT("exit", ("opt_range_condition_rows: %llu",
+ (ulonglong) param->table->opt_range_condition_rows));
DBUG_RETURN(trp);
}
@@ -7328,7 +7593,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
DBUG_RETURN(0);
bitmap_clear_all(covered_fields);
- double total_cost= 0.0f;
+ double total_cost= 0.0;
ha_rows records=0;
bool all_covered;
@@ -7385,11 +7650,13 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
tree->ror_scans, ror_scan_mark););
/* Add priority queue use cost. */
- total_cost += rows2double(records)*
- log((double)(ror_scan_mark - tree->ror_scans)) /
- (TIME_FOR_COMPARE_ROWID * M_LN2);
+ total_cost += (rows2double(records) *
+ log((double)(ror_scan_mark - tree->ror_scans)) *
+ ROWID_COMPARE_COST_THD(param->thd) / M_LN2);
DBUG_PRINT("info", ("Covering ROR-intersect full cost: %g", total_cost));
+ /* TODO: Add TIME_FOR_COMPARE cost to total_cost */
+
if (total_cost > read_time)
DBUG_RETURN(NULL);
@@ -7407,9 +7674,9 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
trp->read_cost= total_cost;
trp->records= records;
trp->cpk_scan= NULL;
- set_if_smaller(param->table->opt_range_condition_rows, records);
+ param->table->set_opt_range_condition_rows(records);
- DBUG_PRINT("info",
+ DBUG_PRINT("exit",
("Returning covering ROR-intersect plan: cost %g, records %lu",
trp->read_cost, (ulong) trp->records));
DBUG_RETURN(trp);
@@ -7436,7 +7703,8 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
bool index_read_must_be_used,
bool for_range_access,
- double read_time)
+ double read_time, ha_rows limit,
+ bool using_table_scan)
{
uint idx, UNINIT_VAR(best_idx);
SEL_ARG *key_to_read= NULL;
@@ -7488,23 +7756,23 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
Json_writer_object trace_idx(thd);
trace_idx.add("index", param->table->key_info[keynr].name);
- found_records= check_quick_select(param, idx, read_index_only, key,
- for_range_access, &mrr_flags,
+ found_records= check_quick_select(param, idx, limit, read_index_only,
+ key, for_range_access, &mrr_flags,
&buf_size, &cost, &is_ror_scan);
- if (!for_range_access && !is_ror_scan &&
- !optimizer_flag(param->thd,OPTIMIZER_SWITCH_INDEX_MERGE_SORT_UNION))
+ if (found_records == HA_POS_ERROR ||
+ (!for_range_access && !is_ror_scan &&
+ !optimizer_flag(param->thd,OPTIMIZER_SWITCH_INDEX_MERGE_SORT_UNION)))
{
/* The scan is not a ROR-scan, just skip it */
continue;
}
-
- if (found_records != HA_POS_ERROR && tree->index_scans &&
+ found_read_time= cost.total_cost();
+ if (tree->index_scans &&
(index_scan= (INDEX_SCAN_INFO *)alloc_root(param->mem_root,
sizeof(INDEX_SCAN_INFO))))
{
Json_writer_array trace_range(thd, "ranges");
-
const KEY &cur_key= param->table->key_info[keynr];
const KEY_PART_INFO *key_part= cur_key.key_part;
@@ -7521,19 +7789,30 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
trace_ranges(&trace_range, param, idx, key, key_part);
trace_range.end();
- trace_idx.add("rowid_ordered", is_ror_scan)
- .add("using_mrr", !(mrr_flags & HA_MRR_USE_DEFAULT_IMPL))
- .add("index_only", read_index_only)
- .add("rows", found_records)
- .add("cost", cost.total_cost());
+ if (unlikely(trace_idx.trace_started()))
+ {
+ trace_idx.
+ add("rowid_ordered", is_ror_scan).
+ add("using_mrr", !(mrr_flags & HA_MRR_USE_DEFAULT_IMPL)).
+ add("index_only", read_index_only).
+ add("rows", found_records).
+ add("cost", found_read_time);
+ if (using_table_scan && cost.limit_cost != 0.0)
+ trace_idx.add("cost_with_limit", cost.limit_cost);
+ }
}
- if ((found_records != HA_POS_ERROR) && is_ror_scan)
+ if (is_ror_scan)
{
tree->n_ror_scans++;
tree->ror_scans_map.set_bit(idx);
}
- if (found_records != HA_POS_ERROR &&
- read_time > (found_read_time= cost.total_cost()))
+ /*
+ Use range if best range so far or if we are comparing to a table scan
+ and the cost with limit approximation is better than the table scan
+ */
+ if (read_time > found_read_time ||
+ (using_table_scan && cost.limit_cost != 0.0 &&
+ read_time > cost.limit_cost))
{
read_time= found_read_time;
best_records= found_records;
@@ -7541,9 +7820,10 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
best_idx= idx;
best_mrr_flags= mrr_flags;
best_buf_size= buf_size;
+ using_table_scan= 0;
trace_idx.add("chosen", true);
}
- else
+ else if (unlikely(trace_idx.trace_started()))
{
trace_idx.add("chosen", false);
if (found_records == HA_POS_ERROR)
@@ -10872,6 +11152,50 @@ uint SEL_ARG::get_max_key_part() const
}
+/**
+ Compute the number of eq_ranges top elements in the tree
+
+ This is used by the cost_group_min_max() to calculate the number of
+ groups in SEL_TREE
+
+ @param group_key_parts number of key parts that must be equal
+
+ @return < 0 Not known
+ @return >= 0 Number of groups
+*/
+
+int SEL_ARG::number_of_eq_groups(uint group_key_parts) const
+{
+ int elements= 0;
+ SEL_ARG const *cur;
+
+ if (part > group_key_parts-1 || type != KEY_RANGE)
+ return -1;
+
+ cur= first();
+ do
+ {
+ if ((cur->min_flag | cur->min_flag) &
+ (NO_MIN_RANGE | NO_MAX_RANGE | NEAR_MIN | NEAR_MAX | GEOM_FLAG))
+ return -1;
+ if (min_value != max_value && !min_max_are_equal())
+ return -1;
+ if (part != group_key_parts -1)
+ {
+ int tmp;
+ if (!next_key_part)
+ return -1;
+ if ((tmp= next_key_part->number_of_eq_groups(group_key_parts)) < 0)
+ return -1;
+ elements+= tmp;
+ }
+ else
+ elements++;
+ } while ((cur= cur->next));
+ return elements;
+}
+
+
/*
Remove the SEL_ARG graph elements which have part > max_part.
@@ -10924,8 +11248,8 @@ void prune_sel_arg_graph(SEL_ARG *sel_arg, uint max_part)
@return
tree pointer The tree after processing,
- NULL If it was not possible to reduce the weight of the tree below the
- limit.
+ NULL If it was not possible to reduce the weight of the tree below
+ the limit.
*/
SEL_ARG *enforce_sel_arg_weight_limit(RANGE_OPT_PARAM *param, uint keyno,
@@ -10961,7 +11285,7 @@ SEL_ARG *enforce_sel_arg_weight_limit(RANGE_OPT_PARAM *param, uint keyno,
uint weight2= sel_arg? sel_arg->weight : 0;
- if (weight2 != weight1)
+ if (unlikely(weight2 != weight1 && param->thd->trace_started()))
{
Json_writer_object wrapper(param->thd);
Json_writer_object obj(param->thd, "enforce_sel_arg_weight_limit");
@@ -10970,8 +11294,9 @@ SEL_ARG *enforce_sel_arg_weight_limit(RANGE_OPT_PARAM *param, uint keyno,
else
obj.add("pseudo_index", field->field_name);
- obj.add("old_weight", (longlong)weight1);
- obj.add("new_weight", (longlong)weight2);
+ obj.
+ add("old_weight", (longlong)weight1).
+ add("new_weight", (longlong)weight2);
}
return sel_arg;
}
@@ -10995,12 +11320,16 @@ bool sel_arg_and_weight_heuristic(RANGE_OPT_PARAM *param, SEL_ARG *key1,
ulong max_weight= param->thd->variables.optimizer_max_sel_arg_weight;
if (max_weight && key1->weight + key1->elements*key2->weight > max_weight)
{
- Json_writer_object wrapper(param->thd);
- Json_writer_object obj(param->thd, "sel_arg_weight_heuristic");
- obj.add("key1_field", key1->field->field_name);
- obj.add("key2_field", key2->field->field_name);
- obj.add("key1_weight", (longlong)key1->weight);
- obj.add("key2_weight", (longlong)key2->weight);
+ if (unlikely(param->thd->trace_started()))
+ {
+ Json_writer_object wrapper(param->thd);
+ Json_writer_object obj(param->thd, "sel_arg_weight_heuristic");
+ obj.
+ add("key1_field", key1->field->field_name).
+ add("key2_field", key2->field->field_name).
+ add("key1_weight", (longlong)key1->weight).
+ add("key2_weight", (longlong)key2->weight);
+ }
return true; // Discard key2
}
return false;
@@ -11490,6 +11819,26 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
}
#endif
+
+/**
+ Check if first key part has only one value
+
+ @retval 1 yes
+ @retval 0 no
+*/
+
+static bool check_if_first_key_part_has_only_one_value(SEL_ARG *arg)
+{
+ if (arg->left != &null_element || arg->right != &null_element)
+ return 0; // Multiple key values
+ if ((arg->min_flag | arg->max_flag) & (NEAR_MIN | NEAR_MAX))
+ return 0;
+ if (unlikely(arg->type != SEL_ARG::KEY_RANGE)) // Not a valid range
+ return 0;
+ return arg->min_value == arg->max_value || !arg->cmp_min_to_max(arg);
+}
+
+
/*
Calculate cost and E(#rows) for a given index and intervals tree
@@ -11518,13 +11867,15 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
*/
static
-ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
+ha_rows check_quick_select(PARAM *param, uint idx, ha_rows limit,
+ bool index_only,
SEL_ARG *tree, bool update_tbl_stats,
uint *mrr_flags, uint *bufsize, Cost_estimate *cost,
bool *is_ror_scan)
{
SEL_ARG_RANGE_SEQ seq;
- RANGE_SEQ_IF seq_if = {NULL, sel_arg_range_seq_init, sel_arg_range_seq_next, 0, 0};
+ RANGE_SEQ_IF seq_if=
+ {NULL, sel_arg_range_seq_init, sel_arg_range_seq_next, 0, 0};
handler *file= param->table->file;
ha_rows rows= HA_POS_ERROR;
uint keynr= param->real_keynr[idx];
@@ -11551,7 +11902,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
param->max_key_parts=0;
seq.is_ror_scan= TRUE;
- if (file->index_flags(keynr, 0, TRUE) & HA_KEY_SCAN_NOT_ROR)
+ if (param->table->key_info[keynr].index_flags & HA_KEY_SCAN_NOT_ROR)
seq.is_ror_scan= FALSE;
*mrr_flags= param->force_default_mrr? HA_MRR_USE_DEFAULT_IMPL: 0;
@@ -11564,9 +11915,9 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
// Passing wrong second argument to index_flags() makes no difference for
// most storage engines but might be an issue for MyRocks with certain
// datatypes.
+ // Note that HA_KEYREAD_ONLY implies that this is not a clustered index
if (index_only &&
- (file->index_flags(keynr, param->max_key_parts, 1) & HA_KEYREAD_ONLY) &&
- !(file->index_flags(keynr, param->max_key_parts, 1) & HA_CLUSTERED_INDEX))
+ (file->index_flags(keynr, param->max_key_parts, 1) & HA_KEYREAD_ONLY))
*mrr_flags |= HA_MRR_INDEX_ONLY;
if (param->thd->lex->sql_command != SQLCOM_SELECT)
@@ -11579,7 +11930,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
*/
if (param->table->pos_in_table_list->is_non_derived())
rows= file->multi_range_read_info_const(keynr, &seq_if, (void*)&seq, 0,
- bufsize, mrr_flags, cost);
+ bufsize, mrr_flags, limit, cost);
param->quick_rows[keynr]= rows;
if (rows != HA_POS_ERROR)
{
@@ -11592,24 +11943,28 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
This check is needed as sometimes that table statistics or range
estimates may be slightly out of sync.
*/
- rows= table_records;
- set_if_bigger(rows, 1);
+ rows= MY_MAX(table_records, 1);
param->quick_rows[keynr]= rows;
}
param->possible_keys.set_bit(keynr);
if (update_tbl_stats)
{
+ TABLE::OPT_RANGE *range= param->table->opt_range + keynr;
param->table->opt_range_keys.set_bit(keynr);
- param->table->opt_range[keynr].key_parts= param->max_key_parts;
- param->table->opt_range[keynr].ranges= param->range_count;
- param->table->opt_range_condition_rows=
- MY_MIN(param->table->opt_range_condition_rows, rows);
- param->table->opt_range[keynr].rows= rows;
- param->table->opt_range[keynr].cost= cost->total_cost();
- if (param->table->file->is_clustering_key(keynr))
- param->table->opt_range[keynr].index_only_cost= 0;
- else
- param->table->opt_range[keynr].index_only_cost= cost->index_only_cost();
+ range->key_parts= param->max_key_parts;
+ range->ranges= param->range_count;
+ param->table->set_opt_range_condition_rows(rows);
+ range->selectivity= (rows ?
+ (param->table->opt_range_condition_rows /
+ rows) :
+ 1.0); // ok as rows is 0
+ range->rows= rows;
+ range->cost= *cost;
+ range->max_index_blocks= file->index_blocks(keynr, range->ranges,
+ rows);
+ range->max_row_blocks= MY_MIN(file->row_blocks(), rows * file->stats.block_size / IO_SIZE);
+ range->first_key_part_has_only_one_value=
+ check_if_first_key_part_has_only_one_value(tree);
}
}
@@ -11642,6 +11997,8 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
*is_ror_scan= seq.is_ror_scan;
DBUG_PRINT("exit", ("Records: %lu", (ulong) rows));
+ DBUG_ASSERT(rows == HA_POS_ERROR ||
+ rows <= MY_MAX(param->table->stat_records(), 1));
DBUG_RETURN(rows); //psergey-merge:todo: maintain first_null_comp.
}
@@ -13626,6 +13983,12 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
The group-by list is a permutation of the select attributes, according
to their order in the index.
+ EXAMPLES of handled queries
+ select max(keypart2) from t1 group by keypart1
+ select max(keypart2) from t1 where keypart2 <= const group by keypart1
+ select distinct keypart1 from table;
+ select count(distinct keypart1) from table;
+
TODO
- What happens if the query groups by the MIN/MAX field, and there is no
other field as in: "select MY_MIN(a) from t1 group by a" ?
@@ -13679,11 +14042,13 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
cause= "not single_table";
else if (join->select_lex->olap == ROLLUP_TYPE) /* Check (B3) for ROLLUP */
cause= "rollup";
- else if (table->s->keys == 0) /* There are no indexes to use. */
+ else if (table->s->keys == 0) // There are no indexes to use.
cause= "no index";
else if (join->conds && join->conds->used_tables()
- & OUTER_REF_TABLE_BIT) /* Cannot execute with correlated conditions. */
+ & OUTER_REF_TABLE_BIT) // Cannot execute with correlated conditions.
cause= "correlated conditions";
+ else if (table->stat_records() == 0)
+ cause= "Empty table"; // Exit now, records=0 messes up cost computations
if (cause)
{
@@ -13697,7 +14062,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
(!join->select_distinct) &&
!is_agg_distinct)
{
- trace_group.add("chosen", false).add("cause","no group by or distinct");
+ if (unlikely(trace_group.trace_started()))
+ trace_group.add("chosen", false).add("cause","no group by or distinct");
DBUG_RETURN(NULL);
}
/* Analyze the query in more detail. */
@@ -13722,8 +14088,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
continue;
else
{
- trace_group.add("chosen", false)
- .add("cause", "not applicable aggregate function");
+ if (unlikely(trace_group.trace_started()))
+ trace_group.
+ add("chosen", false).
+ add("cause", "not applicable aggregate function");
DBUG_RETURN(NULL);
}
@@ -13735,15 +14103,19 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
min_max_arg_item= (Item_field*) expr;
else if (! min_max_arg_item->eq(expr, 1))
{
- trace_group.add("chosen", false)
- .add("cause", "arguments different in min max function");
+ if (unlikely(trace_group.trace_started()))
+ trace_group.
+ add("chosen", false).
+ add("cause", "arguments different in min max function");
DBUG_RETURN(NULL);
}
}
else
{
- trace_group.add("chosen", false)
- .add("cause", "no field item in min max function");
+ if (unlikely(trace_group.trace_started()))
+ trace_group.
+ add("chosen", false).
+ add("cause", "no field item in min max function");
DBUG_RETURN(NULL);
}
}
@@ -13752,8 +14124,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
/* Check (SA7). */
if (is_agg_distinct && (have_max || have_min))
{
- trace_group.add("chosen", false)
- .add("cause", "have both agg distinct and min max");
+ if (unlikely(trace_group.trace_started()))
+ trace_group.
+ add("chosen", false).
+ add("cause", "have both agg distinct and min max");
DBUG_RETURN(NULL);
}
@@ -13765,8 +14139,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
{
if (item->real_item()->type() != Item::FIELD_ITEM)
{
- trace_group.add("chosen", false)
- .add("cause", "distinct field is expression");
+ if (unlikely(trace_group.trace_started()))
+ trace_group.
+ add("chosen", false).
+ add("cause", "distinct field is expression");
DBUG_RETURN(NULL);
}
}
@@ -13778,8 +14154,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
{
if ((*tmp_group->item)->real_item()->type() != Item::FIELD_ITEM)
{
- trace_group.add("chosen", false)
- .add("cause", "group field is expression");
+ if (unlikely(trace_group.trace_started()))
+ trace_group.
+ add("chosen", false).
+ add("cause", "group field is expression");
DBUG_RETURN(NULL);
}
elements_in_group++;
@@ -13874,7 +14252,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
does not qualify as covering in our case. If this is the case, below
we check that all query fields are indeed covered by 'cur_index'.
*/
- if (cur_index_info->user_defined_key_parts == table->actual_n_key_parts(cur_index_info)
+ if (cur_index_info->user_defined_key_parts ==
+ table->actual_n_key_parts(cur_index_info)
&& pk < MAX_KEY && cur_index != pk &&
(table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
{
@@ -13917,7 +14296,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
first Item? If so, then why? What is the array for?
*/
/* Above we already checked that all group items are fields. */
- DBUG_ASSERT((*tmp_group->item)->real_item()->type() == Item::FIELD_ITEM);
+ DBUG_ASSERT((*tmp_group->item)->real_item()->type() ==
+ Item::FIELD_ITEM);
Item_field *group_field= (Item_field *) (*tmp_group->item)->real_item();
if (group_field->field->eq(cur_part->field))
{
@@ -14146,6 +14526,17 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
if ((cur_index_tree= tree->keys[cur_param_idx]))
{
cur_quick_prefix_records= param->quick_rows[cur_index];
+ if (!cur_quick_prefix_records)
+ {
+ /*
+ Non-constant table has a range with rows=0. Can happen e.g. for
+ Merge tables. Regular range access will be just as good as loose
+ scan.
+ */
+ if (unlikely(trace_idx.trace_started()))
+ trace_idx.add("aborting_search", "range with rows=0");
+ DBUG_RETURN(NULL);
+ }
if (unlikely(cur_index_tree && thd->trace_started()))
{
Json_writer_array trace_range(thd, "ranges");
@@ -14205,8 +14596,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
Field::itMBR : Field::itRAW,
&has_min_max_fld, &has_other_fld))
{
- trace_group.add("usable", false)
- .add("cause", "unsupported predicate on agg attribute");
+ if (unlikely(trace_group.trace_started()))
+ trace_group.
+ add("usable", false).
+ add("cause", "unsupported predicate on agg attribute");
DBUG_RETURN(NULL);
}
@@ -14215,8 +14608,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
*/
if (is_agg_distinct && table->file->is_clustering_key(index))
{
- trace_group.add("usable", false)
- .add("cause", "index is clustered");
+ if (unlikely(trace_group.trace_started()))
+ trace_group.
+ add("usable", false).
+ add("cause", "index is clustered");
DBUG_RETURN(NULL);
}
@@ -14237,11 +14632,31 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
read_plan->read_cost= best_read_cost;
read_plan->records= best_records;
- if (read_time < best_read_cost && is_agg_distinct)
+ if (is_agg_distinct)
{
- trace_group.add("index_scan", true);
- read_plan->read_cost= 0;
- read_plan->use_index_scan();
+ double best_cost, duplicate_removal_cost;
+ ulonglong records;
+ handler *file= table->file;
+
+ /* Calculate cost of distinct scan on index */
+ if (best_index_tree && read_plan->quick_prefix_records)
+ records= read_plan->quick_prefix_records;
+ else
+ records= table->stat_records();
+
+ best_cost= file->cost(file->ha_key_scan_time(index, records));
+ /* We only have 'best_records' left after duplication elimination */
+ best_cost+= best_records * WHERE_COST_THD(thd);
+ duplicate_removal_cost= (DUPLICATE_REMOVAL_COST * records);
+
+ if (best_cost < read_plan->read_cost + duplicate_removal_cost)
+ {
+ read_plan->read_cost= best_cost;
+ read_plan->use_index_scan();
+ trace_group.
+ add("scan_cost", best_cost).
+ add("index_scan", true);
+ }
}
DBUG_PRINT("info",
@@ -14712,13 +15127,42 @@ get_field_keypart(KEY *index, Field *field)
have_min [in] True if there is a MIN function
have_max [in] True if there is a MAX function
read_cost [out] The cost to retrieve rows via this quick select
- records [out] The number of rows retrieved
+ out_records [out] The number of rows retrieved
DESCRIPTION
This method computes the access cost of a TRP_GROUP_MIN_MAX instance and
the number of rows returned.
+ The used algorithm used for finding the rows is:
+
+ For each range (if no ranges, the range is the whole table)
+ Do an index search for the start of the range
+ As long as the found key is withing the range
+ If the found row matches the where clause, use the row otherwise skip it
+ Scan index for next group, jumping over all identical keys, done in
+ QUICK_GROUP_MIN_MAX_SELECT::next_prefix
+ If the engine does not support a native next_prefix, we will
+ either scan the index for the next value or do a new index dive
+ with 'find next bigger key'.
+
+ When using MIN() and MAX() in the query, the calls to the storage engine
+ are as follows for each group:
+ Assuming kp1 in ('abc','def','ghi)' and kp2 between 1000 and 2000
+
+ read_key('abc', HA_READ_KEY_OR_NEXT)
+ In case of MIN() we do:
+ read_key('abc,:'1000', HA_READ_KEY_OR_NEXT)
+ In case of MAX() we do
+ read_key('abc,:'2000', HA_READ_PREFIX_LAST_OR_PREV)
+ In the following code we will assume that the MIN key will be in
+ the same block as the first key read.
+ (We should try to optimize away the extra call for MAX() at some
+ point).
+
NOTES
+ See get_best_group_min_max() for which kind of queries this function
+ will be called.
+
The cost computation distinguishes several cases:
1) No equality predicates over non-group attributes (thus no key_infix).
If groups are bigger than blocks on the average, then we assume that it
@@ -14760,104 +15204,108 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
uint group_key_parts, SEL_TREE *range_tree,
SEL_ARG *index_tree, ha_rows quick_prefix_records,
bool have_min, bool have_max,
- double *read_cost, ha_rows *records)
+ double *read_cost, ha_rows *out_records)
{
- ha_rows table_records;
+ uint key_length;
+ ha_rows records;
ha_rows num_groups;
ha_rows num_blocks;
- uint keys_per_block;
ha_rows keys_per_group;
- ha_rows keys_per_subgroup; /* Average number of keys in sub-groups */
- /* formed by a key infix. */
- double p_overlap; /* Probability that a sub-group overlaps two blocks. */
double quick_prefix_selectivity;
- double io_cost;
+ ulonglong io_cost;
+ handler *file= table->file;
DBUG_ENTER("cost_group_min_max");
- table_records= table->stat_records();
- /* Assume block is 75 % full */
- keys_per_block= (uint) (table->file->stats.block_size * 3 / 4 /
- (index_info->key_length + table->file->ref_length)
- + 1);
- num_blocks= (ha_rows)(table_records / keys_per_block) + 1;
+ /* Same code as in handler::key_read_time() */
+ records= table->stat_records();
+ key_length= (index_info->key_length + file->ref_length);
/* Compute the number of keys in a group. */
if (!group_key_parts)
{
/* Summary over the whole table */
- keys_per_group= table_records;
+ keys_per_group= MY_MAX(records,1);
}
else
{
keys_per_group= (ha_rows) index_info->actual_rec_per_key(group_key_parts -
1);
+ if (keys_per_group == 0) /* If there is no statistics try to guess */
+ {
+ /* each group contains 10% of all records */
+ keys_per_group= (records / 10) + 1;
+ }
}
-
- if (keys_per_group == 0) /* If there is no statistics try to guess */
- /* each group contains 10% of all records */
- keys_per_group= (table_records / 10) + 1;
- num_groups= (table_records / keys_per_group) + 1;
+ if (keys_per_group > 1)
+ num_groups= (records / keys_per_group) + 1;
+ else
+ num_groups= records;
/* Apply the selectivity of the quick select for group prefixes. */
if (range_tree && (quick_prefix_records != HA_POS_ERROR))
{
+ int groups;
quick_prefix_selectivity= (double) quick_prefix_records /
- (double) table_records;
+ (double) records;
num_groups= (ha_rows) rint(num_groups * quick_prefix_selectivity);
- set_if_bigger(num_groups, 1);
- }
+ records= quick_prefix_records;
+
+ /*
+ Try to handle cases like
+ WHERE a in (1,2,3) GROUP BY a
- if (used_key_parts > group_key_parts)
- { /*
- Compute the probability that two ends of a subgroup are inside
- different blocks.
+ If all ranges are eq_ranges for the group_key_parts we can use
+ this as the number of groups.
*/
- keys_per_subgroup= (ha_rows) index_info->actual_rec_per_key(used_key_parts - 1);
- if (keys_per_subgroup >= keys_per_block) /* If a subgroup is bigger than */
- p_overlap= 1.0; /* a block, it will overlap at least two blocks. */
+ groups= index_tree->number_of_eq_groups(group_key_parts);
+ if (groups > 0)
+ num_groups= groups;
else
{
- double blocks_per_group= (double) num_blocks / (double) num_groups;
- p_overlap= (blocks_per_group * (keys_per_subgroup - 1)) / keys_per_group;
- p_overlap= MY_MIN(p_overlap, 1.0);
+ /*
+ Expect at least as many groups as there is ranges in the index
+
+ This is mostly relevant for queries with few records, which is
+ something we have a lot of in our test suites.
+ In theory it is possible to scan the index_tree and for cases
+ where all ranges are eq ranges, we could calculate the exact number
+ of groups. This is probably an overkill so for now we estimate
+ the lower level of number of groups by the range elements in the
+ tree.
+ */
+ set_if_bigger(num_groups, MY_MAX(index_tree->elements, 1));
}
- io_cost= (double) MY_MIN(num_groups * (1 + p_overlap), num_blocks);
+ /* There cannot be more groups than matched records */
+ set_if_smaller(num_groups, quick_prefix_records);
}
- else
- io_cost= (keys_per_group > keys_per_block) ?
- (have_min && have_max) ? (double) (num_groups + 1) :
- (double) num_groups :
- (double) num_blocks;
+ DBUG_ASSERT(num_groups <= records);
+
+ /* Calculate the number of blocks we will touch for the table or range scan */
+ num_blocks= (records * key_length / INDEX_BLOCK_FILL_FACTOR_DIV *
+ INDEX_BLOCK_FILL_FACTOR_MUL) / file->stats.block_size + 1;
+
+ io_cost= (have_max) ? num_groups * 2 : num_groups;
+ set_if_smaller(io_cost, num_blocks);
/*
CPU cost must be comparable to that of an index scan as computed
in SQL_SELECT::test_quick_select(). When the groups are small,
e.g. for a unique index, using index scan will be cheaper since it
reads the next record without having to re-position to it on every
- group. To make the CPU cost reflect this, we estimate the CPU cost
- as the sum of:
- 1. Cost for evaluating the condition (similarly as for index scan).
- 2. Cost for navigating the index structure (assuming a b-tree).
- Note: We only add the cost for one comparision per block. For a
- b-tree the number of comparisons will be larger.
- TODO: This cost should be provided by the storage engine.
+ group.
*/
- const double tree_traversal_cost=
- ceil(log(static_cast<double>(table_records))/
- log(static_cast<double>(keys_per_block))) *
- 1/(2*TIME_FOR_COMPARE);
-
- const double cpu_cost= num_groups *
- (tree_traversal_cost + 1/TIME_FOR_COMPARE_IDX);
-
- *read_cost= io_cost + cpu_cost;
- *records= num_groups;
+ uint keyno= (uint) (index_info - table->key_info);
+ *read_cost= file->cost(file->ha_keyread_and_compare_time(keyno,
+ (ulong) num_groups,
+ num_groups,
+ io_cost));
+ *out_records= num_groups;
DBUG_PRINT("info",
- ("table rows: %lu keys/block: %u keys/group: %lu "
+ ("rows: %lu keys/group: %lu "
"result rows: %lu blocks: %lu",
- (ulong) table_records, keys_per_block, (ulong) keys_per_group,
- (ulong) *records, (ulong) num_blocks));
+ (ulong) records, (ulong) keys_per_group,
+ (ulong) *out_records, (ulong) num_blocks));
DBUG_VOID_RETURN;
}
@@ -14897,7 +15345,8 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows,
group_prefix_len, group_key_parts,
used_key_parts, index_info, index,
read_cost, records, key_infix_len,
- key_infix, parent_alloc, is_index_scan);
+ key_infix, parent_alloc,
+ is_index_scan);
if (!quick)
DBUG_RETURN(NULL);
diff --git a/sql/opt_range.h b/sql/opt_range.h
index a505cd09ea4..b3a7939c869 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -302,6 +302,7 @@ class SEL_ARG :public Sql_alloc
{
static int sel_cmp(Field *field, uchar *a, uchar *b, uint8 a_flag,
uint8 b_flag);
+ bool min_max_are_equal() const;
public:
uint8 min_flag,max_flag,maybe_flag;
uint8 part; // Which key part
@@ -401,6 +402,7 @@ public:
return false;
return true;
}
+ int number_of_eq_groups(uint group_key_parts) const;
inline void merge_flags(SEL_ARG *arg) { maybe_flag|=arg->maybe_flag; }
inline void maybe_smaller() { maybe_flag=1; }
/* Return true iff it's a single-point null interval */
@@ -1109,6 +1111,13 @@ public:
*/
uint used_key_parts;
+ /*
+ Set to 1 if we used group by optimization to calculate number of rows
+ in the result, stored in table->opt_range_condition_rows.
+ This is only used for asserts.
+ */
+ bool group_by_optimization_used;
+
QUICK_SELECT_I();
virtual ~QUICK_SELECT_I() = default;;
diff --git a/sql/opt_split.cc b/sql/opt_split.cc
index 86ed442814c..f2d536cd47b 100644
--- a/sql/opt_split.cc
+++ b/sql/opt_split.cc
@@ -188,6 +188,7 @@
#include "mariadb.h"
#include "sql_select.h"
#include "opt_trace.h"
+#include "optimizer_defaults.h"
/* Info on a splitting field */
struct SplM_field_info
@@ -245,10 +246,10 @@ public:
List<SplM_plan_info> plan_cache;
/* Cost of best execution plan for join when nothing is pushed */
double unsplit_cost;
+ /* Split operation cost (result form spl_postjoin_oper_cost()) */
+ double unsplit_oper_cost;
/* Cardinality of T when nothing is pushed */
double unsplit_card;
- /* Lastly evaluated execution plan for 'join' with pushed equalities */
- SplM_plan_info *last_plan;
SplM_plan_info *find_plan(TABLE *table, uint key, uint parts);
};
@@ -665,20 +666,28 @@ add_ext_keyuses_for_splitting_field(Dynamic_array<KEYUSE_EXT> *ext_keyuses,
/*
@brief
Cost of the post join operation used in specification of splittable table
+ This does not include the cost of creating the temporary table as this
+ operation can be executed many times for the same temporary table.
*/
static
double spl_postjoin_oper_cost(THD *thd, double join_record_count, uint rec_len)
{
double cost;
- cost= get_tmp_table_write_cost(thd, join_record_count,rec_len) *
- join_record_count; // cost to fill tmp table
- cost+= get_tmp_table_lookup_cost(thd, join_record_count,rec_len) *
- join_record_count; // cost to perform post join operation used here
- cost+= get_tmp_table_lookup_cost(thd, join_record_count, rec_len) +
- (join_record_count == 0 ? 0 :
- join_record_count * log2 (join_record_count)) *
- SORT_INDEX_CMP_COST; // cost to perform sorting
+ TMPTABLE_COSTS tmp_cost= get_tmp_table_costs(thd, join_record_count,
+ rec_len, 0, 1);
+ /* cost to fill tmp table */
+ cost= tmp_cost.write * join_record_count;
+ /* cost to perform post join operation used here */
+ cost+= tmp_cost.lookup * join_record_count;
+ /* cost to preform sorting */
+ /* QQQ
+ We should use cost_of_filesort() for computing sort.
+ Do we always preform sorting ? If not, this should be done conditionally
+ */
+ cost+= ((join_record_count == 0 ? 0 :
+ join_record_count * log2 (join_record_count)) *
+ SORT_INDEX_CMP_COST);
return cost;
}
@@ -710,7 +719,6 @@ void JOIN::add_keyuses_for_splitting()
size_t idx;
KEYUSE_EXT *keyuse_ext;
KEYUSE_EXT keyuse_ext_end;
- double oper_cost;
uint rec_len;
uint added_keyuse_count;
TABLE *table= select_lex->master_unit()->derived->table;
@@ -733,14 +741,20 @@ void JOIN::add_keyuses_for_splitting()
if (ext_keyuses_for_splitting->push(keyuse_ext_end))
goto err;
- spl_opt_info->unsplit_card= join_record_count;
+ /*
+ Use the number of rows that was computed by
+ TABLE_LIST::fetch_number_of_rows():
+ */
+ spl_opt_info->unsplit_card=
+ rows2double(select_lex->master_unit()->derived->table->stat_records());
rec_len= table->s->rec_buff_length;
- oper_cost= spl_postjoin_oper_cost(thd, join_record_count, rec_len);
-
- spl_opt_info->unsplit_cost= best_positions[table_count-1].read_time +
- oper_cost;
+ spl_opt_info->unsplit_oper_cost= spl_postjoin_oper_cost(thd,
+ join_record_count,
+ rec_len);
+ spl_opt_info->unsplit_cost= (best_positions[table_count-1].read_time +
+ spl_opt_info->unsplit_oper_cost);
if (!(save_qep= new Join_plan_state(table_count + 1)))
goto err;
@@ -872,7 +886,7 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
splitting the function set it as the true plan of materialization
of the table T.
The function caches the found plans for materialization of table T
- together if the info what key was used for splitting. Next time when
+ together with the info what key was used for splitting. Next time when
the optimizer prefers to use the same key the plan is taken from
the cache of plans
@@ -897,6 +911,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
SplM_plan_info *spl_plan= 0;
uint best_key= 0;
uint best_key_parts= 0;
+ bool chosen, already_printed;
/*
Check whether there are keys that can be used to join T employing splitting
@@ -954,7 +969,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
}
while (keyuse_ext->table == table);
}
- spl_opt_info->last_plan= 0;
+ chosen= 0;
if (best_table)
{
/*
@@ -972,7 +987,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
table_map all_table_map= (((table_map) 1) << join->table_count) - 1;
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
best_key, remaining_tables, true);
- choose_plan(join, all_table_map & ~join->const_table_map);
+ choose_plan(join, all_table_map & ~join->const_table_map, 0);
/*
Check that the chosen plan is really a splitting plan.
@@ -1003,57 +1018,74 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_opt_info->unsplit_card : 1);
uint rec_len= table->s->rec_buff_length;
-
double split_card= spl_opt_info->unsplit_card * spl_plan->split_sel;
- double oper_cost= split_card *
- spl_postjoin_oper_cost(thd, split_card, rec_len);
- spl_plan->cost= join->best_positions[join->table_count-1].read_time +
- + oper_cost;
+ double oper_cost= (split_card *
+ spl_postjoin_oper_cost(thd, split_card, rec_len));
+ spl_plan->cost= (join->best_positions[join->table_count-1].read_time +
+ oper_cost);
+
+ chosen= (record_count * spl_plan->cost + COST_EPS <
+ spl_opt_info->unsplit_cost);
if (unlikely(thd->trace_started()))
{
Json_writer_object wrapper(thd);
- Json_writer_object find_trace(thd, "best_splitting");
- find_trace.add("table", best_table->alias.c_ptr());
- find_trace.add("key", best_table->key_info[best_key].name);
- find_trace.add("record_count", record_count);
- find_trace.add("cost", spl_plan->cost);
- find_trace.add("unsplit_cost", spl_opt_info->unsplit_cost);
+ Json_writer_object find_trace(thd, "split_materialized");
+ find_trace.
+ add("table", best_table->alias.c_ptr()).
+ add("key", best_table->key_info[best_key].name).
+ add("org_cost",join->best_positions[join->table_count-1].read_time).
+ add("postjoin_cost", oper_cost).
+ add("one_splitting_cost", spl_plan->cost).
+ add("unsplit_postjoin_cost", spl_opt_info->unsplit_oper_cost).
+ add("unsplit_cost", spl_opt_info->unsplit_cost).
+ add("rows", split_card).
+ add("outer_rows", record_count).
+ add("total_splitting_cost", record_count * spl_plan->cost).
+ add("chosen", chosen);
}
memcpy((char *) spl_plan->best_positions,
(char *) join->best_positions,
sizeof(POSITION) * join->table_count);
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
best_key, remaining_tables, false);
+ already_printed= 1;
}
- if (spl_plan)
+ else
{
- if(record_count * spl_plan->cost < spl_opt_info->unsplit_cost - 0.01)
- {
- /*
- The best plan that employs splitting is cheaper than
- the plan without splitting
- */
- spl_opt_info->last_plan= spl_plan;
- }
+ chosen= (record_count * spl_plan->cost + COST_EPS <
+ spl_opt_info->unsplit_cost);
+ already_printed= 0;
}
}
/* Set the cost of the preferred materialization for this partial join */
- records= (ha_rows)spl_opt_info->unsplit_card;
- spl_plan= spl_opt_info->last_plan;
- if (spl_plan)
+ if (chosen)
{
+ /*
+ The best plan that employs splitting is cheaper than
+ the plan without splitting
+ */
+
startup_cost= record_count * spl_plan->cost;
- records= (ha_rows) (records * spl_plan->split_sel);
+ records= (ha_rows) (spl_opt_info->unsplit_card * spl_plan->split_sel);
- Json_writer_object trace(thd, "lateral_derived");
- trace.add("startup_cost", startup_cost);
- trace.add("splitting_cost", spl_plan->cost);
- trace.add("records", records);
+ if (unlikely(thd->trace_started()) && ! already_printed)
+ {
+ Json_writer_object trace(thd, "split_materialized");
+ trace.
+ add("one_splitting_cost", spl_plan->cost).
+ add("total_splitting_cost", startup_cost).
+ add("rows", records);
+ }
}
else
- startup_cost= spl_opt_info->unsplit_cost;
+ {
+ /* Restore original values */
+ startup_cost= spl_opt_info->unsplit_cost;
+ records= (ha_rows) spl_opt_info->unsplit_card;
+ spl_plan= 0;
+ }
return spl_plan;
}
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index e91b2ad14c3..1d0b1ff1874 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -35,6 +35,7 @@
#include "sql_test.h"
#include <my_bit.h>
#include "opt_trace.h"
+#include "optimizer_defaults.h"
/*
This file contains optimizations for semi-join subqueries.
@@ -448,7 +449,8 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred);
static bool convert_subq_to_jtbm(JOIN *parent_join,
Item_in_subselect *subq_pred, bool *remove);
static TABLE_LIST *alloc_join_nest(THD *thd);
-static uint get_tmp_table_rec_length(Ref_ptr_array p_list, uint elements);
+static uint get_tmp_table_rec_length(Ref_ptr_array p_list, uint elements,
+ bool *blobs_used);
bool find_eq_ref_candidate(TABLE *table, table_map sj_inner_tables);
static SJ_MATERIALIZATION_INFO *
at_sjmat_pos(const JOIN *join, table_map remaining_tables, const JOIN_TAB *tab,
@@ -664,17 +666,6 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
DBUG_RETURN(-1);
}
}
- /* Check if any table is not supporting comparable rowids */
- {
- List_iterator_fast<TABLE_LIST> li(select_lex->outer_select()->leaf_tables);
- TABLE_LIST *tbl;
- while ((tbl = li++))
- {
- TABLE *table= tbl->table;
- if (table && table->file->ha_table_flags() & HA_NON_COMPARABLE_ROWID)
- join->not_usable_rowid_map|= table->map;
- }
- }
DBUG_PRINT("info", ("Checking if subq can be converted to semi-join"));
/*
@@ -696,9 +687,10 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
11. It is first optimisation (the subquery could be moved from ON
clause during first optimisation and then be considered for SJ
on the second when it is too late)
- 12. All tables supports comparable rowids.
- This is needed for DuplicateWeedout strategy to work (which
- is the catch-all semi-join strategy so it must be applicable).
+
+ There are also other requirements which cannot be checked at this phase,
+ yet. They are checked later in convert_join_subqueries_to_semijoins(),
+ look for calls to block_conversion_to_sj().
*/
if (optimizer_flag(thd, OPTIMIZER_SWITCH_SEMIJOIN) &&
in_subs && // 1
@@ -713,8 +705,7 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
!((join->select_options | // 10
select_lex->outer_select()->join->select_options) // 10
& SELECT_STRAIGHT_JOIN) && // 10
- select_lex->first_cond_optimization && // 11
- join->not_usable_rowid_map == 0) // 12
+ select_lex->first_cond_optimization) // 11
{
DBUG_PRINT("info", ("Subquery is semi-join conversion candidate"));
@@ -910,8 +901,10 @@ bool subquery_types_allow_materialization(THD* thd, Item_in_subselect *in_subs)
outer,
converted_from_in_predicate))
{
- trace_transform.add("possible", false);
- trace_transform.add("cause", "types mismatch");
+ if (unlikely(trace_transform.trace_started()))
+ trace_transform.
+ add("possible", false).
+ add("cause", "types mismatch");
DBUG_RETURN(FALSE);
}
}
@@ -933,8 +926,10 @@ bool subquery_types_allow_materialization(THD* thd, Item_in_subselect *in_subs)
{
in_subs->types_allow_materialization= TRUE;
in_subs->sjm_scan_allowed= all_are_fields;
- trace_transform.add("sjm_scan_allowed", all_are_fields)
- .add("possible", true);
+ if (unlikely(trace_transform.trace_started()))
+ trace_transform.
+ add("sjm_scan_allowed", all_are_fields).
+ add("possible", true);
DBUG_PRINT("info",("subquery_types_allow_materialization: ok, allowed"));
DBUG_RETURN(TRUE);
}
@@ -1224,7 +1219,36 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
}
}
- if (join->select_options & SELECT_STRAIGHT_JOIN)
+ /*
+ Compute join->not_usable_rowid_map.
+ The idea is:
+ - DuplicateWeedout strategy requires that one is able to get the rowid
+ (call h->position()) for tables in the parent select. Obtained Rowid
+ values must be stable across table scans.
+ = Rowids are typically available. The only known exception is federatedx
+ tables.
+ - The optimizer requires that DuplicateWeedout strategy is always
+ applicable. It is the only strategy that is applicable for any join
+ order. The optimizer is not prepared for the situation where it has
+ constructed a join order and then it turns out that there's no semi-join
+ strategy that can be used for it.
+
+ Because of the above, we will not use semi-joins if the parent select has
+ tables which do not support rowids.
+ */
+ {
+ List_iterator_fast<TABLE_LIST> li(join->select_lex->leaf_tables);
+ TABLE_LIST *tbl;
+ while ((tbl = li++))
+ {
+ TABLE *table= tbl->table;
+ if (table && table->file->ha_table_flags() & HA_NON_COMPARABLE_ROWID)
+ join->not_usable_rowid_map|= table->map;
+ }
+ }
+
+ if (join->select_options & SELECT_STRAIGHT_JOIN ||
+ join->not_usable_rowid_map != 0)
{
/* Block conversion to semijoins for all candidates */
li.rewind();
@@ -1294,8 +1318,10 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
OPT_TRACE_TRANSFORM(thd, trace_wrapper, trace_transform,
in_subq->get_select_lex()->select_number,
"IN (SELECT)", "semijoin");
- trace_transform.add("converted_to_semi_join", false)
- .add("cause", "subquery attached to the ON clause");
+ if (unlikely(trace_transform.trace_started()))
+ trace_transform.
+ add("converted_to_semi_join", false).
+ add("cause", "subquery attached to the ON clause");
break;
}
@@ -1307,9 +1333,10 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
if (join->table_count +
in_subq->unit->first_select()->join->table_count >= MAX_TABLES)
{
- trace_transform.add("converted_to_semi_join", false);
- trace_transform.add("cause",
- "table in parent join now exceeds MAX_TABLES");
+ if (unlikely(trace_transform.trace_started()))
+ trace_transform.
+ add("converted_to_semi_join", false).
+ add("cause", "table in parent join now exceeds MAX_TABLES");
break;
}
if (convert_subq_to_sj(join, in_subq))
@@ -1441,6 +1468,7 @@ void get_delayed_table_estimates(TABLE *table,
Item_in_subselect *item= table->pos_in_table_list->jtbm_subselect;
Table_function_json_table *table_function=
table->pos_in_table_list->table_function;
+ handler *file= table->file;
if (table_function)
{
@@ -1460,9 +1488,11 @@ void get_delayed_table_estimates(TABLE *table,
/* Calculate cost of scanning the temptable */
double data_size= COST_MULT(item->jtbm_record_count,
hash_sj_engine->tmp_table->s->reclength);
- /* Do like in handler::scan_time() */
- *scan_time= ((data_size/table->file->stats.block_size+2) *
- table->file->avg_io_cost());
+
+ /* Do like in handler::ha_scan_and_compare_time, but ignore the where cost */
+ *scan_time= ((data_size/IO_SIZE * table->file->DISK_READ_COST *
+ table->file->DISK_READ_RATIO) +
+ *out_rows * file->ROW_COPY_COST);
}
@@ -2490,8 +2520,7 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
!sj_nest->sj_subq_pred->is_correlated &&
sj_nest->sj_subq_pred->types_allow_materialization)
{
- join->emb_sjm_nest= sj_nest;
- if (choose_plan(join, all_table_map &~join->const_table_map))
+ if (choose_plan(join, all_table_map &~join->const_table_map, sj_nest))
DBUG_RETURN(TRUE); /* purecov: inspected */
/*
The best plan to run the subquery is now in join->best_positions,
@@ -2507,24 +2536,13 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
sjm->is_used= FALSE;
double subjoin_out_rows, subjoin_read_time;
- /*
- join->get_partial_cost_and_fanout(n_tables + join->const_tables,
- table_map(-1),
- &subjoin_read_time,
- &subjoin_out_rows);
- */
- join->get_prefix_cost_and_fanout(n_tables,
+ join->get_prefix_cost_and_fanout(n_tables,
&subjoin_read_time,
&subjoin_out_rows);
- sjm->materialization_cost.convert_from_cost(subjoin_read_time);
+ sjm->materialization_cost=subjoin_read_time;
sjm->rows_with_duplicates= sjm->rows= subjoin_out_rows;
- // Don't use the following list because it has "stale" items. use
- // ref_pointer_array instead:
- //
- //List<Item> &right_expr_list=
- // sj_nest->sj_subq_pred->unit->first_select()->item_list;
/*
Adjust output cardinality estimates. If the subquery has form
@@ -2557,8 +2575,12 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
int tableno;
double rows= 1.0;
while ((tableno = tm_it.next_bit()) != Table_map_iterator::BITMAP_END)
- rows= COST_MULT(rows,
- join->map2table[tableno]->table->opt_range_condition_rows);
+ {
+ ha_rows tbl_rows=join->map2table[tableno]->
+ table->opt_range_condition_rows;
+
+ rows= COST_MULT(rows, rows2double(tbl_rows));
+ }
sjm->rows= MY_MIN(sjm->rows, rows);
}
memcpy((uchar*) sjm->positions,
@@ -2568,33 +2590,43 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
/*
Calculate temporary table parameters and usage costs
*/
+ bool blobs_used;
uint rowlen= get_tmp_table_rec_length(subq_select->ref_pointer_array,
- subq_select->item_list.elements);
- double lookup_cost= get_tmp_table_lookup_cost(join->thd,
- subjoin_out_rows, rowlen);
- double write_cost= get_tmp_table_write_cost(join->thd,
- subjoin_out_rows, rowlen);
+ subq_select->item_list.elements,
+ &blobs_used);
+ TMPTABLE_COSTS cost= get_tmp_table_costs(join->thd,
+ subjoin_out_rows, rowlen,
+ blobs_used, 1);
+ double scan_cost, total_cost;
+ double row_copy_cost= ROW_COPY_COST_THD(thd);
/*
Let materialization cost include the cost to write the data into the
- temporary table:
+ temporary table. Note that smj->materialization_cost already includes
+ row copy and compare costs of finding the original row.
*/
- sjm->materialization_cost.add_io(subjoin_out_rows, write_cost);
-
+ sjm->materialization_cost+=subjoin_out_rows * cost.write + cost.create;
+
/*
Set the cost to do a full scan of the temptable (will need this to
- consider doing sjm-scan):
- */
- sjm->scan_cost.reset();
- sjm->scan_cost.add_io(sjm->rows, lookup_cost);
-
- sjm->lookup_cost.convert_from_cost(lookup_cost);
+ consider doing sjm-scan). See ha_scan_time() for the basics of
+ the calculations.
+ We don't need to check the where clause for each row, so no
+ WHERE_COST is needed.
+ */
+ scan_cost= (rowlen * (double) sjm->rows) / cost.block_size;
+ total_cost= (scan_cost * cost.cache_hit_ratio * cost.avg_io_cost +
+ TABLE_SCAN_SETUP_COST_THD(thd) +
+ row_copy_cost * sjm->rows);
+ sjm->scan_cost=total_cost;
+
+ /* When reading a row, we have also to check the where clause */
+ sjm->lookup_cost= cost.lookup + WHERE_COST_THD(thd);
sj_nest->sj_mat_info= sjm;
DBUG_EXECUTE("opt", print_sjm(sjm););
}
}
}
- join->emb_sjm_nest= NULL;
DBUG_RETURN(FALSE);
}
@@ -2616,11 +2648,13 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
Length of the temptable record, in bytes
*/
-static uint get_tmp_table_rec_length(Ref_ptr_array p_items, uint elements)
+static uint get_tmp_table_rec_length(Ref_ptr_array p_items, uint elements,
+ bool *blobs_used)
{
uint len= 0;
Item *item;
- //List_iterator<Item> it(items);
+
+ *blobs_used= 0;
for (uint i= 0; i < elements ; i++)
{
item = p_items[i];
@@ -2643,6 +2677,8 @@ static uint get_tmp_table_rec_length(Ref_ptr_array p_items, uint elements)
len += 8;
else
len += item->max_length;
+ if (item->max_length > MAX_FIELD_VARCHARLENGTH)
+ *blobs_used= 1;
break;
case DECIMAL_RESULT:
len += 10;
@@ -2658,46 +2694,62 @@ static uint get_tmp_table_rec_length(Ref_ptr_array p_items, uint elements)
/**
- The cost of a lookup into a unique hash/btree index on a temporary table
- with 'row_count' rows each of size 'row_size'.
+ The cost of a create, write and read into a unique hash/btree index on
+ a temporary table with 'row_count' rows each of size 'row_size'.
@param thd current query context
@param row_count number of rows in the temp table
@param row_size average size in bytes of the rows
- @return the cost of one lookup
-*/
-
-double
-get_tmp_table_lookup_cost(THD *thd, double row_count, uint row_size)
-{
- if (row_count > thd->variables.max_heap_table_size / (double) row_size)
- return (double) DISK_TEMPTABLE_LOOKUP_COST;
- else
- return (double) HEAP_TEMPTABLE_LOOKUP_COST;
-}
-
-/**
- The cost of writing a row into a temporary table with 'row_count' unique
- rows each of size 'row_size'.
+ @return The cost of using the temporary table
- @param thd current query context
- @param row_count number of rows in the temp table
- @param row_size average size in bytes of the rows
-
- @return the cost of writing one row
+ TODO:
+ This is an optimistic estimate. We are not taking into account:
+ - That we first write into a memory and then overflow to disk.
+ - If binary trees would be used for heap tables.
+ - The addition cost of writing a row to memory/disk and possible
+ index reorganization.
*/
-double
-get_tmp_table_write_cost(THD *thd, double row_count, uint row_size)
+TMPTABLE_COSTS
+get_tmp_table_costs(THD *thd, double row_count, uint row_size, bool blobs_used,
+ bool add_copy_cost)
{
- double lookup_cost= get_tmp_table_lookup_cost(thd, row_count, row_size);
- /*
- TODO:
- This is an optimistic estimate. Add additional costs resulting from
- actually writing the row to memory/disk and possible index reorganization.
- */
- return lookup_cost;
+ TMPTABLE_COSTS cost;
+ /* From heap_prepare_hp_create_info(), assuming one hash key used */
+ row_size+= sizeof(char*)*2;
+ row_size= MY_ALIGN(MY_MAX(row_size, sizeof(char*)) + 1, sizeof(char*));
+
+ if (row_count > thd->variables.max_heap_table_size / (double) row_size ||
+ blobs_used)
+ {
+ double row_copy_cost= (add_copy_cost ?
+ tmp_table_optimizer_costs.row_copy_cost :
+ 0);
+ /* Disk based table */
+ cost.lookup= ((tmp_table_optimizer_costs.key_lookup_cost *
+ tmp_table_optimizer_costs.disk_read_ratio) +
+ row_copy_cost);
+ cost.write= cost.lookup;
+ cost.create= DISK_TEMPTABLE_CREATE_COST;
+ cost.block_size= DISK_TEMPTABLE_BLOCK_SIZE;
+ cost.avg_io_cost= tmp_table_optimizer_costs.disk_read_cost;
+ cost.cache_hit_ratio= tmp_table_optimizer_costs.disk_read_ratio;
+ }
+ else
+ {
+ /* Values are as they are in heap.h */
+ double row_copy_cost= (add_copy_cost ?
+ heap_optimizer_costs.row_copy_cost :
+ 0);
+ cost.lookup= HEAP_TEMPTABLE_LOOKUP_COST + row_copy_cost;
+ cost.write= cost.lookup;
+ cost.create= HEAP_TEMPTABLE_CREATE_COST;
+ cost.block_size= 1;
+ cost.avg_io_cost= 0;
+ cost.cache_hit_ratio= 0;
+ }
+ return cost;
}
@@ -2942,9 +2994,16 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx,
2. using strategy Z is cheaper, but it only removes
fanout from semijoin X.
3. We have no clue what to do about fanount of semi-join Y.
+
+ For the first iteration read_time will always be bigger than
+ *current_read_time (as the 'strategy' is an addition to the
+ chosen plan) . If a strategy was picked
+ (dusp_producing_tables & handled_fanout is true), then
+ *current_read_time is updated and the cost for the next
+ strategy can be smaller than *current_read_time.
*/
if ((dups_producing_tables & handled_fanout) ||
- (read_time < *current_read_time &&
+ (read_time + COST_EPS < *current_read_time &&
!(handled_fanout & pos->inner_tables_handled_with_other_sjs)))
{
DBUG_ASSERT(pos->sj_strategy != sj_strategy);
@@ -3142,9 +3201,9 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
mat_read_time=
COST_ADD(prefix_cost,
- COST_ADD(mat_info->materialization_cost.total_cost(),
+ COST_ADD(mat_info->materialization_cost,
COST_MULT(prefix_rec_count,
- mat_info->lookup_cost.total_cost())));
+ mat_info->lookup_cost)));
/*
NOTE: When we pick to use SJM[-Scan] we don't memcpy its POSITION
@@ -3158,8 +3217,9 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
*strategy= SJ_OPT_MATERIALIZE;
if (unlikely(trace.trace_started()))
{
- trace.add("records", *record_count);
- trace.add("read_time", *read_time);
+ trace.
+ add("rows", *record_count).
+ add("cost", *read_time);
}
return TRUE;
}
@@ -3193,9 +3253,9 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
/* Add materialization cost */
prefix_cost=
COST_ADD(prefix_cost,
- COST_ADD(mat_info->materialization_cost.total_cost(),
+ COST_ADD(mat_info->materialization_cost,
COST_MULT(prefix_rec_count,
- mat_info->scan_cost.total_cost())));
+ mat_info->scan_cost)));
prefix_rec_count= COST_MULT(prefix_rec_count, mat_info->rows);
uint i;
@@ -3212,10 +3272,8 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
best_access_path(join, join->positions[i].table, rem_tables,
join->positions, i,
disable_jbuf, prefix_rec_count, &curpos, &dummy);
- prefix_rec_count= COST_MULT(prefix_rec_count, curpos.records_read);
+ prefix_rec_count= COST_MULT(prefix_rec_count, curpos.records_out);
prefix_cost= COST_ADD(prefix_cost, curpos.read_time);
- prefix_cost= COST_ADD(prefix_cost,
- prefix_rec_count / TIME_FOR_COMPARE);
//TODO: take into account join condition selectivity here
}
@@ -3240,8 +3298,9 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
*handled_fanout= mat_nest->sj_inner_tables;
if (unlikely(trace.trace_started()))
{
- trace.add("records", *record_count);
- trace.add("read_time", *read_time);
+ trace.
+ add("rows", *record_count).
+ add("cost", *read_time);
}
return TRUE;
}
@@ -3340,8 +3399,9 @@ bool LooseScan_picker::check_qep(JOIN *join,
*handled_fanout= first->table->emb_sj_nest->sj_inner_tables;
if (unlikely(trace.trace_started()))
{
- trace.add("records", *record_count);
- trace.add("read_time", *read_time);
+ trace.
+ add("rows", *record_count).
+ add("cost", *read_time);
}
return TRUE;
}
@@ -3432,13 +3492,18 @@ bool Firstmatch_picker::check_qep(JOIN *join,
optimizer_flag(join->thd, OPTIMIZER_SWITCH_SEMIJOIN_WITH_CACHE))
{
/*
- An important special case: only one inner table, and @@optimizer_switch
- allows join buffering.
+ An important special case: only one inner table, and
+ @@optimizer_switch allows join buffering.
- read_time is the same (i.e. FirstMatch doesn't add any cost
- - remove fanout added by the last table
+ - remove fanout added by the last table)
*/
if (*record_count)
- *record_count /= join->positions[idx].records_read;
+ *record_count /= join->positions[idx].records_out;
+ /*
+ Remember this choice for
+ fix_semijoin_strategies_for_picked_join_order()
+ */
+ join->positions[idx].firstmatch_with_join_buf= 1;
}
else
{
@@ -3458,8 +3523,9 @@ bool Firstmatch_picker::check_qep(JOIN *join,
*strategy= SJ_OPT_FIRST_MATCH;
if (unlikely(trace.trace_started()))
{
- trace.add("records", *record_count);
- trace.add("read_time", *read_time);
+ trace.
+ add("rows", *record_count).
+ add("cost", *read_time);
}
return TRUE;
}
@@ -3471,6 +3537,56 @@ bool Firstmatch_picker::check_qep(JOIN *join,
}
+/*
+ Duplicate_weedout strategy is described at
+ https://mariadb.com/kb/en/duplicateweedout-strategy/
+
+ The idea is that if one has a subquery of type:
+
+ select *
+ from Country
+ where
+ Country.code IN (select City.Country
+ from City
+ where
+ ...)
+
+ Before semi join optimization it was executed this way:
+ - Scan rows in Country
+ - For each accepted row, execute the sub query with
+ 'Country.code = City.Country' added to the WHERE clause and with
+ LIMIT 1
+
+ With semi join optimization it can be converted to the following semi join.
+
+ select * from Country semi-join City
+ where Country.code = City.Country and ...
+
+ This is executed as:
+
+ - Scan rows in Country
+ - Scan rows in City with 'Country.code = City.Country' added to the
+ subquery WHERE clause. Stop scanning after the first match.
+
+ or
+
+ - Create temporary table to store City.Country (with a unique key)
+ - Scan rows in City (according to plan for City) and put them into the
+ temporary table
+ - Scan the temporary table
+ - Do index lookup in Country table with City.Country
+
+With Duplicate_weedout we would try to instead do:
+
+ - Create temporary table to hold unique rowid's for the Country
+ - Scan rows in City (according to plan for City)
+ - Scan rows in Country (according to plan for Country)
+ - Write Country.id rowid to temporary table. If there was no
+ conflicting row in the temporary table, accept the row combination.
+ - Delete temporary table
+*/
+
+
void Duplicate_weedout_picker::set_from_prev(POSITION *prev)
{
if (prev->dups_weedout_picker.is_used)
@@ -3535,46 +3651,42 @@ bool Duplicate_weedout_picker::check_qep(JOIN *join,
*/
uint first_tab= first_dupsweedout_table;
double dups_cost;
- double prefix_rec_count;
+ double first_weedout_table_rec_count;
double sj_inner_fanout= 1.0;
double sj_outer_fanout= 1.0;
uint temptable_rec_size;
- Json_writer_object trace(join->thd);
- trace.add("strategy", "DuplicateWeedout");
if (first_tab == join->const_tables)
{
- prefix_rec_count= 1.0;
+ first_weedout_table_rec_count= 1.0;
temptable_rec_size= 0;
dups_cost= 0.0;
}
else
{
dups_cost= join->positions[first_tab - 1].prefix_cost;
- prefix_rec_count= join->positions[first_tab - 1].prefix_record_count;
+ first_weedout_table_rec_count=
+ join->positions[first_tab - 1].prefix_record_count;
temptable_rec_size= 8; /* This is not true but we'll make it so */
}
table_map dups_removed_fanout= 0;
- double current_fanout= prefix_rec_count;
for (uint j= first_dupsweedout_table; j <= idx; j++)
{
POSITION *p= join->positions + j;
- current_fanout= COST_MULT(current_fanout, p->records_read);
- dups_cost= COST_ADD(dups_cost,
- COST_ADD(p->read_time,
- current_fanout / TIME_FOR_COMPARE));
+ dups_cost= COST_ADD(dups_cost, p->read_time);
+
if (p->table->emb_sj_nest)
{
- sj_inner_fanout= COST_MULT(sj_inner_fanout, p->records_read);
+ sj_inner_fanout= COST_MULT(sj_inner_fanout, p->records_out);
dups_removed_fanout |= p->table->table->map;
}
else
{
+ sj_outer_fanout= COST_MULT(sj_outer_fanout, p->records_out);
/* Ensure that table supports comparable rowids */
DBUG_ASSERT(!(p->table->table->file->ha_table_flags() & HA_NON_COMPARABLE_ROWID));
- sj_outer_fanout= COST_MULT(sj_outer_fanout, p->records_read);
temptable_rec_size += p->table->table->file->ref_length;
}
}
@@ -3583,32 +3695,38 @@ bool Duplicate_weedout_picker::check_qep(JOIN *join,
Add the cost of temptable use. The table will have sj_outer_fanout
records, and we will make
- sj_outer_fanout table writes
- - sj_inner_fanout*sj_outer_fanout lookups.
+ - sj_inner_fanout*sj_outer_fanout lookups.
+ There is no row copy cost (as we are only copying rowid) and no
+ compare cost (as we are only checking if the row exists by
+ checking if we got a write error.
*/
- double one_lookup_cost= get_tmp_table_lookup_cost(join->thd,
- sj_outer_fanout,
- temptable_rec_size);
- double one_write_cost= get_tmp_table_write_cost(join->thd,
- sj_outer_fanout,
- temptable_rec_size);
-
- double write_cost= COST_MULT(join->positions[first_tab].prefix_record_count,
- sj_outer_fanout * one_write_cost);
- double full_lookup_cost=
- COST_MULT(join->positions[first_tab].prefix_record_count,
- COST_MULT(sj_outer_fanout,
- sj_inner_fanout * one_lookup_cost));
- dups_cost= COST_ADD(dups_cost, COST_ADD(write_cost, full_lookup_cost));
+ TMPTABLE_COSTS one_cost= get_tmp_table_costs(join->thd,
+ sj_outer_fanout,
+ temptable_rec_size,
+ 0, 0);
+ double write_cost= (one_cost.create +
+ first_weedout_table_rec_count * sj_outer_fanout * one_cost.write);
+ double full_lookup_cost= (first_weedout_table_rec_count* sj_outer_fanout *
+ sj_inner_fanout * one_cost.lookup);
+ *read_time= dups_cost + write_cost + full_lookup_cost;
- *read_time= dups_cost;
- *record_count= prefix_rec_count * sj_outer_fanout;
+ *record_count= first_weedout_table_rec_count * sj_outer_fanout;
*handled_fanout= dups_removed_fanout;
*strategy= SJ_OPT_DUPS_WEEDOUT;
- if (unlikely(trace.trace_started()))
+ if (unlikely(join->thd->trace_started()))
{
- trace.add("records", *record_count);
- trace.add("read_time", *read_time);
+ Json_writer_object trace(join->thd);
+ trace.
+ add("strategy", "DuplicateWeedout").
+ add("prefix_row_count", first_weedout_table_rec_count).
+ add("tmp_table_rows", sj_outer_fanout).
+ add("sj_inner_fanout", sj_inner_fanout).
+ add("rows", *record_count).
+ add("dups_cost", dups_cost).
+ add("write_cost", write_cost).
+ add("full_lookup_cost", full_lookup_cost).
+ add("total_cost", *read_time);
}
return TRUE;
}
@@ -3657,33 +3775,37 @@ void JOIN::dbug_verify_sj_inner_tables(uint prefix_size) const
*/
void restore_prev_sj_state(const table_map remaining_tables,
- const JOIN_TAB *tab, uint idx)
+ const JOIN_TAB *tab, uint idx)
{
TABLE_LIST *emb_sj_nest;
- if (tab->emb_sj_nest)
+ if ((emb_sj_nest= tab->emb_sj_nest))
{
- table_map subq_tables= tab->emb_sj_nest->sj_inner_tables;
+ table_map subq_tables= emb_sj_nest->sj_inner_tables;
tab->join->sjm_lookup_tables &= ~subq_tables;
- }
- if (!tab->join->emb_sjm_nest && (emb_sj_nest= tab->emb_sj_nest))
- {
- table_map subq_tables= emb_sj_nest->sj_inner_tables &
- ~tab->join->const_table_map;
- /* If we're removing the last SJ-inner table, remove the sj-nest */
- if ((remaining_tables & subq_tables) == subq_tables)
- {
- // All non-const tables of the SJ nest are in the remaining_tables.
- // we are not in the nest anymore.
- tab->join->cur_sj_inner_tables &= ~emb_sj_nest->sj_inner_tables;
- }
- else
+ if (!tab->join->emb_sjm_nest)
{
- // Semi-join nest has:
- // - a table being removed (not in the prefix)
- // - some tables in the prefix.
- tab->join->cur_sj_inner_tables |= emb_sj_nest->sj_inner_tables;
+ table_map subq_tables= (emb_sj_nest->sj_inner_tables &
+ ~tab->join->const_table_map);
+ /* If we're removing the last SJ-inner table, remove the sj-nest */
+ if ((remaining_tables & subq_tables) == subq_tables)
+ {
+ /*
+ All non-const tables of the SJ nest are in the remaining_tables.
+ we are not in the nest anymore.
+ */
+ tab->join->cur_sj_inner_tables &= ~emb_sj_nest->sj_inner_tables;
+ }
+ else
+ {
+ /*
+ Semi-join nest has:
+ - a table being removed (not in the prefix)
+ - some tables in the prefix.
+ */
+ tab->join->cur_sj_inner_tables |= emb_sj_nest->sj_inner_tables;
+ }
}
}
@@ -3795,6 +3917,8 @@ at_sjmat_pos(const JOIN *join, table_map remaining_tables, const JOIN_TAB *tab,
static void recalculate_prefix_record_count(JOIN *join, uint start, uint end)
{
+ DBUG_ASSERT(start >= join->const_tables);
+
for (uint j= start; j < end ;j++)
{
double prefix_count;
@@ -3802,7 +3926,7 @@ static void recalculate_prefix_record_count(JOIN *join, uint start, uint end)
prefix_count= 1.0;
else
prefix_count= COST_MULT(join->best_positions[j-1].prefix_record_count,
- join->best_positions[j-1].records_read);
+ join->best_positions[j-1].records_out);
join->best_positions[j].prefix_record_count= prefix_count;
}
@@ -3954,7 +4078,7 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
join->best_positions, i,
FALSE, prefix_rec_count,
join->best_positions + i, &dummy);
- prefix_rec_count *= join->best_positions[i].records_read;
+ prefix_rec_count *= join->best_positions[i].records_out;
rem_tables &= ~join->best_positions[i].table->table->map;
}
}
@@ -3966,7 +4090,7 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
join->best_positions[first].n_sj_tables= tablenr - first + 1;
POSITION dummy; // For loose scan paths
double record_count= (first== join->const_tables)? 1.0:
- join->best_positions[tablenr - 1].prefix_record_count;
+ join->best_positions[first - 1].prefix_record_count;
table_map rem_tables= remaining_tables;
uint idx;
@@ -3989,14 +4113,15 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
{
trace_one_table.add_table_name(join->best_positions[idx].table);
}
- if (join->best_positions[idx].use_join_buffer)
+ if (join->best_positions[idx].use_join_buffer &&
+ !join->best_positions[idx].firstmatch_with_join_buf)
{
best_access_path(join, join->best_positions[idx].table,
rem_tables, join->best_positions, idx,
TRUE /* no jbuf */,
record_count, join->best_positions + idx, &dummy);
}
- record_count *= join->best_positions[idx].records_read;
+ record_count *= join->best_positions[idx].records_out;
rem_tables &= ~join->best_positions[idx].table->table->map;
}
}
@@ -4007,7 +4132,7 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
POSITION *first_pos= join->best_positions + first;
POSITION loose_scan_pos; // For loose scan paths
double record_count= (first== join->const_tables)? 1.0:
- join->best_positions[tablenr - 1].prefix_record_count;
+ join->best_positions[first - 1].prefix_record_count;
table_map rem_tables= remaining_tables;
uint idx;
@@ -4048,13 +4173,14 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
*/
if (join->best_positions[idx].key)
{
+ DBUG_ASSERT(join->best_positions[idx].type != JT_RANGE);
delete join->best_positions[idx].table->quick;
join->best_positions[idx].table->quick= NULL;
}
}
}
rem_tables &= ~join->best_positions[idx].table->table->map;
- record_count *= join->best_positions[idx].records_read;
+ record_count *= join->best_positions[idx].records_out;
}
first_pos->sj_strategy= SJ_OPT_LOOSE_SCAN;
first_pos->n_sj_tables= my_count_bits(first_pos->table->emb_sj_nest->sj_inner_tables);
@@ -4778,7 +4904,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
DBUG_PRINT("info",("Creating group key in temporary table"));
share->keys=1;
share->uniques= MY_TEST(using_unique_constraint);
- table->key_info=keyinfo;
+ table->key_info= share->key_info= keyinfo;
keyinfo->key_part=key_part_info;
keyinfo->flags=HA_NOSAME;
keyinfo->usable_key_parts= keyinfo->user_defined_key_parts= 1;
@@ -5272,7 +5398,8 @@ int setup_semijoin_dups_elimination(JOIN *join, ulonglong options,
Got a table that's not within any semi-join nest. This is a case
like this:
- SELECT * FROM ot1, nt1 WHERE ot1.col IN (SELECT expr FROM it1, it2)
+ SELECT * FROM ot1, nt1 WHERE
+ ot1.col IN (SELECT expr FROM it1, it2)
with a join order of
@@ -5749,10 +5876,10 @@ enum_nested_loop_state join_tab_execution_startup(JOIN_TAB *tab)
((subselect_hash_sj_engine*)in_subs->engine);
if (!hash_sj_engine->is_materialized)
{
- hash_sj_engine->materialize_join->exec();
+ int error= hash_sj_engine->materialize_join->exec();
hash_sj_engine->is_materialized= TRUE;
- if (unlikely(hash_sj_engine->materialize_join->error) ||
+ if (unlikely(error) ||
unlikely(tab->join->thd->is_fatal_error))
DBUG_RETURN(NESTED_LOOP_ERROR);
}
@@ -6532,18 +6659,14 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
IN/ALL/ANY optimizations are not applicable for so called fake select
(this select exists only to filter results of union if it is needed).
*/
- if (select_lex == select_lex->master_unit()->fake_select_lex)
- return 0;
-
- if (is_in_subquery())
- {
- in_subs= unit->item->get_IN_subquery();
- if (in_subs->create_in_to_exists_cond(this))
- return true;
- }
- else
+ if (select_lex == select_lex->master_unit()->fake_select_lex ||
+ likely(!is_in_subquery()))
return false;
+ in_subs= unit->item->get_IN_subquery();
+ if (in_subs->create_in_to_exists_cond(this))
+ return true;
+
/* A strategy must be chosen earlier. */
DBUG_ASSERT(in_subs->has_strategy());
DBUG_ASSERT(in_to_exists_where || in_to_exists_having);
@@ -6574,6 +6697,7 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
/* The cost of the IN->EXISTS strategy. */
double in_exists_strategy_cost;
double dummy;
+ const char *strategy;
/*
A. Estimate the number of rows of the outer table that will be filtered
@@ -6635,7 +6759,6 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
/* Get the cost of the modified IN-EXISTS plan. */
inner_read_time_2= inner_join->best_read;
-
}
else
{
@@ -6647,39 +6770,58 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
C. Compute execution costs.
*/
/* C.1 Compute the cost of the materialization strategy. */
- //uint rowlen= get_tmp_table_rec_length(unit->first_select()->item_list);
- uint rowlen= get_tmp_table_rec_length(ref_ptrs,
- select_lex->item_list.elements);
- /* The cost of writing one row into the temporary table. */
- double write_cost= get_tmp_table_write_cost(thd, inner_record_count_1,
- rowlen);
- /* The cost of a lookup into the unique index of the materialized table. */
- double lookup_cost= get_tmp_table_lookup_cost(thd, inner_record_count_1,
- rowlen);
+ bool blobs_used;
+ uint rowlen= get_tmp_table_rec_length(ref_ptrs,
+ select_lex->item_list.elements,
+ &blobs_used);
+ /* The cost of using the temp table */
+ TMPTABLE_COSTS cost= get_tmp_table_costs(thd, inner_record_count_1,
+ rowlen, blobs_used, 1);
/*
The cost of executing the subquery and storing its result in an indexed
temporary table.
*/
- double materialization_cost= COST_ADD(inner_read_time_1,
- COST_MULT(write_cost,
- inner_record_count_1));
+ double materialization_cost=
+ COST_ADD(cost.create,
+ COST_ADD(inner_read_time_1,
+ COST_MULT((cost.write + WHERE_COST_THD(thd)),
+ inner_record_count_1)));
- materialize_strategy_cost= COST_ADD(materialization_cost,
- COST_MULT(outer_lookup_keys,
- lookup_cost));
+ materialize_strategy_cost=
+ COST_ADD(materialization_cost,
+ COST_MULT(outer_lookup_keys, cost.lookup));
/* C.2 Compute the cost of the IN=>EXISTS strategy. */
- in_exists_strategy_cost= COST_MULT(outer_lookup_keys, inner_read_time_2);
+ in_exists_strategy_cost=
+ COST_MULT(outer_lookup_keys, inner_read_time_2);
/* C.3 Compare the costs and choose the cheaper strategy. */
if (materialize_strategy_cost >= in_exists_strategy_cost)
+ {
in_subs->set_strategy(SUBS_IN_TO_EXISTS);
+ strategy= "in_to_exists";
+ }
else
+ {
in_subs->set_strategy(SUBS_MATERIALIZATION);
+ strategy= "materialization";
+ }
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_subquery(thd, "subquery_plan");
+ trace_subquery.
+ add("rows", inner_record_count_1).
+ add("materialization_cost", materialize_strategy_cost).
+ add("in_exist_cost", in_exists_strategy_cost).
+ add("choosen", strategy);
+ }
DBUG_PRINT("info",
- ("mat_strategy_cost: %.2f, mat_cost: %.2f, write_cost: %.2f, lookup_cost: %.2f",
- materialize_strategy_cost, materialization_cost, write_cost, lookup_cost));
+ ("mat_strategy_cost: %.2f mat_cost: %.2f write_cost: %.2f "
+ "lookup_cost: %.2f",
+ materialize_strategy_cost, materialization_cost, cost.write,
+ cost.lookup));
DBUG_PRINT("info",
("inx_strategy_cost: %.2f, inner_read_time_2: %.2f",
in_exists_strategy_cost, inner_read_time_2));
@@ -6705,6 +6847,13 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
implementation, fall back to IN-TO-EXISTS.
*/
in_subs->set_strategy(SUBS_IN_TO_EXISTS);
+
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_subquery(thd, "subquery_plan_revert");
+ trace_subquery.add("choosen", "in_to_exists");
+ }
}
if (in_subs->test_strategy(SUBS_MATERIALIZATION))
diff --git a/sql/opt_subselect.h b/sql/opt_subselect.h
index 7b1b810ee81..c0398fc8539 100644
--- a/sql/opt_subselect.h
+++ b/sql/opt_subselect.h
@@ -226,15 +226,18 @@ public:
if (!(found_part & 1 ) && /* no usable ref access for 1st key part */
s->table->covering_keys.is_set(key))
{
+ double records, read_time;
part1_conds_met= TRUE;
+ handler *file= s->table->file;
DBUG_PRINT("info", ("Can use full index scan for LooseScan"));
/* Calculate the cost of complete loose index scan. */
- double records= rows2double(s->table->file->stats.records);
+ records= rows2double(file->stats.records);
/* The cost is entire index scan cost (divided by 2) */
- double read_time= s->table->file->keyread_time(key, 1,
- (ha_rows) records);
+ read_time= file->cost(file->ha_keyread_and_copy_time(key, 1,
+ (ha_rows) records,
+ 0));
/*
Now find out how many different keys we will get (for now we
@@ -291,12 +294,23 @@ public:
}
}
- void save_to_position(JOIN_TAB *tab, POSITION *pos)
+ void save_to_position(JOIN_TAB *tab, double record_count,
+ double records_out,
+ POSITION *pos)
{
pos->read_time= best_loose_scan_cost;
if (best_loose_scan_cost != DBL_MAX)
{
+ /*
+ Make sure LooseScan plan doesn't produce more rows than
+ the records_out of other table access method.
+ */
+ set_if_smaller(best_loose_scan_records, records_out);
+
+ pos->loops= record_count;
pos->records_read= best_loose_scan_records;
+ pos->records_init= pos->records_read;
+ pos->records_out= best_loose_scan_records;
pos->key= best_loose_scan_start_key;
pos->cond_selectivity= 1.0;
pos->loosescan_picker.loosescan_key= best_loose_scan_key;
@@ -320,7 +334,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx,
void update_sj_state(JOIN *join, const JOIN_TAB *new_tab,
uint idx, table_map remaining_tables);
void restore_prev_sj_state(const table_map remaining_tables,
- const JOIN_TAB *tab, uint idx);
+ const JOIN_TAB *tab, uint idx);
void fix_semijoin_strategies_for_picked_join_order(JOIN *join);
diff --git a/sql/opt_trace.cc b/sql/opt_trace.cc
index 4bc493940fb..1008690ccaa 100644
--- a/sql/opt_trace.cc
+++ b/sql/opt_trace.cc
@@ -149,7 +149,7 @@ void opt_trace_disable_if_no_security_context_access(THD *thd)
return;
}
Opt_trace_context *const trace= &thd->opt_trace;
- if (!thd->trace_started())
+ if (unlikely(!thd->trace_started()))
{
/*
@@optimizer_trace has "enabled=on" but trace is not started.
@@ -201,7 +201,7 @@ void opt_trace_disable_if_no_stored_proc_func_access(THD *thd, sp_head *sp)
if (likely(!(thd->variables.optimizer_trace &
Opt_trace_context::FLAG_ENABLED)) ||
thd->system_thread ||
- !thd->trace_started())
+ likely(!thd->trace_started()))
return;
Opt_trace_context *const trace= &thd->opt_trace;
@@ -235,7 +235,7 @@ void opt_trace_disable_if_no_tables_access(THD *thd, TABLE_LIST *tbl)
if (likely(!(thd->variables.optimizer_trace &
Opt_trace_context::FLAG_ENABLED)) ||
thd->system_thread ||
- !thd->trace_started())
+ likely(!thd->trace_started()))
return;
Opt_trace_context *const trace= &thd->opt_trace;
@@ -296,7 +296,7 @@ void opt_trace_disable_if_no_view_access(THD *thd, TABLE_LIST *view,
if (likely(!(thd->variables.optimizer_trace &
Opt_trace_context::FLAG_ENABLED)) ||
thd->system_thread ||
- !thd->trace_started())
+ likely(!thd->trace_started()))
return;
Opt_trace_context *const trace= &thd->opt_trace;
@@ -562,43 +562,49 @@ void Opt_trace_stmt::set_allowed_mem_size(size_t mem_size)
current_json->set_size_limit(mem_size);
}
-/*
- Prefer this when you are iterating over JOIN_TABs
-*/
-void Json_writer::add_table_name(const JOIN_TAB *tab)
+void get_table_name_for_trace(const JOIN_TAB *tab, String *out)
{
+ char table_name_buffer[64];
+ DBUG_ASSERT(tab != NULL);
DBUG_ASSERT(tab->join->thd->trace_started());
- if (tab != NULL)
+
+ if (tab->table && tab->table->derived_select_number)
{
- char table_name_buffer[SAFE_NAME_LEN];
- if (tab->table && tab->table->derived_select_number)
- {
- /* Derived table name generation */
- size_t len= my_snprintf(table_name_buffer, sizeof(table_name_buffer)-1,
- "<derived%u>",
- tab->table->derived_select_number);
- add_str(table_name_buffer, len);
- }
- else if (tab->bush_children)
- {
- JOIN_TAB *ctab= tab->bush_children->start;
- size_t len= my_snprintf(table_name_buffer,
- sizeof(table_name_buffer)-1,
- "<subquery%d>",
- ctab->emb_sj_nest->sj_subq_pred->get_identifier());
- add_str(table_name_buffer, len);
- }
- else
- {
- TABLE_LIST *real_table= tab->table->pos_in_table_list;
- add_str(real_table->alias.str, real_table->alias.length);
- }
+ /* Derived table name generation */
+ size_t len= my_snprintf(table_name_buffer, sizeof(table_name_buffer)-1,
+ "<derived%u>",
+ tab->table->derived_select_number);
+ out->copy(table_name_buffer, len, &my_charset_bin);
+ }
+ else if (tab->bush_children)
+ {
+ JOIN_TAB *ctab= tab->bush_children->start;
+ size_t len= my_snprintf(table_name_buffer,
+ sizeof(table_name_buffer)-1,
+ "<subquery%d>",
+ ctab->emb_sj_nest->sj_subq_pred->get_identifier());
+ out->copy(table_name_buffer, len, &my_charset_bin);
}
else
- DBUG_ASSERT(0);
+ {
+ TABLE_LIST *real_table= tab->table->pos_in_table_list;
+ out->set(real_table->alias.str, real_table->alias.length, &my_charset_bin);
+ }
}
+/*
+ Prefer this when you are iterating over JOIN_TABs
+*/
+
+void Json_writer::add_table_name(const JOIN_TAB *tab)
+{
+ String sbuf;
+ get_table_name_for_trace(tab, &sbuf);
+ add_str(sbuf.ptr(), sbuf.length());
+}
+
+
void Json_writer::add_table_name(const TABLE *table)
{
add_str(table->pos_in_table_list->alias.str);
@@ -608,6 +614,7 @@ void Json_writer::add_table_name(const TABLE *table)
void trace_condition(THD * thd, const char *name, const char *transform_type,
Item *item, const char *table_name)
{
+ DBUG_ASSERT(thd->trace_started());
Json_writer_object trace_wrapper(thd);
Json_writer_object trace_cond(thd, transform_type);
trace_cond.add("condition", name);
@@ -623,8 +630,10 @@ void add_table_scan_values_to_trace(THD *thd, JOIN_TAB *tab)
Json_writer_object table_records(thd);
table_records.add_table_name(tab);
Json_writer_object table_rec(thd, "table_scan");
- table_rec.add("rows", tab->found_records)
- .add("cost", tab->read_time);
+ table_rec.
+ add("rows", tab->found_records).
+ add("read_cost", tab->read_time).
+ add("read_and_compare_cost", tab->cached_scan_and_compare_time);
}
@@ -642,18 +651,26 @@ void add_table_scan_values_to_trace(THD *thd, JOIN_TAB *tab)
analysis of the various join orders.
*/
-void trace_plan_prefix(JOIN *join, uint idx, table_map join_tables)
+void trace_plan_prefix(Json_writer_object *jsobj, JOIN *join, uint idx,
+ table_map join_tables)
{
- THD *const thd= join->thd;
- DBUG_ASSERT(thd->trace_started());
+ DBUG_ASSERT(join->thd->trace_started());
- Json_writer_array plan_prefix(thd, "plan_prefix");
- for (uint i= 0; i < idx; i++)
+ String prefix_str;
+ prefix_str.length(0);
+ for (uint i= join->const_tables; i < idx; i++)
{
TABLE_LIST *const tr= join->positions[i].table->tab_list;
if (!(tr->map & join_tables))
- plan_prefix.add_table_name(join->positions[i].table);
+ {
+ String str;
+ get_table_name_for_trace(join->positions[i].table, &str);
+ if (prefix_str.length() != 0)
+ prefix_str.append(',');
+ prefix_str.append(str);
+ }
}
+ jsobj->add("plan_prefix", prefix_str.ptr(), prefix_str.length());
}
@@ -680,23 +697,30 @@ void print_final_join_order(JOIN *join)
for (j= join->join_tab,i=0 ; i < join->top_join_tab_count;
i++, j++)
best_order.add_table_name(j);
+ best_order.end();
+
+ /* Write information about the resulting join */
+ join_order.
+ add("rows", join->join_record_count).
+ add("cost", join->best_read);
}
-void print_best_access_for_table(THD *thd, POSITION *pos,
- enum join_type type)
+void print_best_access_for_table(THD *thd, POSITION *pos)
{
DBUG_ASSERT(thd->trace_started());
Json_writer_object obj(thd, "chosen_access_method");
- obj.add("type", type == JT_ALL ? "scan" : join_type_str[type]);
- obj.add("records", pos->records_read);
- obj.add("cost", pos->read_time);
- obj.add("uses_join_buffering", pos->use_join_buffer);
+ obj.
+ add("type", pos->type == JT_ALL ? "scan" : join_type_str[pos->type]).
+ add("rows_read", pos->records_read).
+ add("rows_out", pos->records_out).
+ add("cost", pos->read_time).
+ add("uses_join_buffering", pos->use_join_buffer);
if (pos->range_rowid_filter_info)
{
- uint key_no= pos->range_rowid_filter_info->key_no;
- obj.add("rowid_filter_key",
+ uint key_no= pos->range_rowid_filter_info->get_key_no();
+ obj.add("rowid_filter_index",
pos->table->table->key_info[key_no].name);
}
}
diff --git a/sql/opt_trace.h b/sql/opt_trace.h
index 1ee23a33591..c6b5c4ea338 100644
--- a/sql/opt_trace.h
+++ b/sql/opt_trace.h
@@ -107,10 +107,10 @@ void opt_trace_print_expanded_query(THD *thd, SELECT_LEX *select_lex,
Json_writer_object *trace_object);
void add_table_scan_values_to_trace(THD *thd, JOIN_TAB *tab);
-void trace_plan_prefix(JOIN *join, uint idx, table_map join_tables);
+void trace_plan_prefix(Json_writer_object *jsobj, JOIN *join, uint idx,
+ table_map join_tables);
void print_final_join_order(JOIN *join);
-void print_best_access_for_table(THD *thd, POSITION *pos,
- enum join_type type);
+void print_best_access_for_table(THD *thd, POSITION *pos);
void trace_condition(THD * thd, const char *name, const char *transform_type,
Item *item, const char *table_name= nullptr);
diff --git a/sql/optimizer_costs.h b/sql/optimizer_costs.h
new file mode 100644
index 00000000000..3b2300b9019
--- /dev/null
+++ b/sql/optimizer_costs.h
@@ -0,0 +1,162 @@
+#ifndef OPTIMIZER_COSTS_INCLUDED
+#define OPTIMIZER_COSTS_INCLUDED
+/*
+ Copyright (c) 2022, MariaDB AB
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; version 2 of
+ the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
+*/
+
+/*
+ This file defines costs structures and cost functions used by the optimizer
+*/
+
+
+/*
+ OPTIMIZER_COSTS stores cost variables for each engine. They are stored
+ in linked_optimizer_costs (pointed to by handlerton) and TABLE_SHARE.
+*/
+
+#define OPTIMIZER_COST_UNDEF -1.0
+struct OPTIMIZER_COSTS
+{
+ double disk_read_cost;
+ double index_block_copy_cost;
+ double key_cmp_cost;
+ double key_copy_cost;
+ double key_lookup_cost;
+ double key_next_find_cost;
+ double disk_read_ratio;
+ double row_copy_cost;
+ double row_lookup_cost;
+ double row_next_find_cost;
+ double rowid_cmp_cost;
+ double rowid_copy_cost;
+ double initialized; // Set if default or connected with handlerton
+};
+
+/* Default optimizer costs */
+extern OPTIMIZER_COSTS default_optimizer_costs;
+/*
+ These are used to avoid taking mutex while creating tmp tables
+ These are created once after the server is started so they are
+ not dynamic.
+*/
+extern OPTIMIZER_COSTS heap_optimizer_costs, tmp_table_optimizer_costs;
+
+/*
+ Interface to the engine cost variables. See optimizer_defaults.h for
+ the default values.
+*/
+
+#define DISK_READ_RATIO costs->disk_read_ratio
+#define KEY_LOOKUP_COST costs->key_lookup_cost
+#define ROW_LOOKUP_COST costs->row_lookup_cost
+#define INDEX_BLOCK_COPY_COST costs->index_block_copy_cost
+#define KEY_COPY_COST costs->key_copy_cost
+#define ROW_COPY_COST costs->row_copy_cost
+#define ROW_COPY_COST_THD(THD) default_optimizer_costs.row_copy_cost
+#define KEY_NEXT_FIND_COST costs->key_next_find_cost
+#define ROW_NEXT_FIND_COST costs->row_next_find_cost
+#define KEY_COMPARE_COST costs->key_cmp_cost
+#define SORT_INDEX_CMP_COST default_optimizer_costs.key_cmp_cost
+#define DISK_READ_COST costs->disk_read_cost
+#define DISK_READ_COST_THD(thd) default_optimizer_costs.disk_read_cost
+
+/* Cost of comparing two rowids. This is set relative to KEY_COMPARE_COST */
+#define ROWID_COMPARE_COST costs->rowid_cmp_cost
+#define ROWID_COMPARE_COST_THD(THD) default_optimizer_costs.rowid_cmp_cost
+
+/* Cost of comparing two rowids. This is set relative to KEY_COPY_COST */
+#define ROWID_COPY_COST costs->rowid_copy_cost
+
+/* Engine unrelated costs. Stored in THD so that the user can change them */
+#define WHERE_COST optimizer_where_cost
+#define WHERE_COST_THD(THD) ((THD)->variables.optimizer_where_cost)
+#define TABLE_SCAN_SETUP_COST optimizer_scan_setup_cost
+#define TABLE_SCAN_SETUP_COST_THD(THD) (THD)->variables.optimizer_scan_setup_cost
+#define INDEX_SCAN_SETUP_COST optimizer_scan_setup_cost/2
+/* Cost for doing duplicate removal in test_quick_select */
+#define DUPLICATE_REMOVAL_COST default_optimizer_costs.key_copy_cost
+
+/* Default fill factors of an (b-tree) index block is assumed to be 0.75 */
+#define INDEX_BLOCK_FILL_FACTOR_DIV 3
+#define INDEX_BLOCK_FILL_FACTOR_MUL 4
+
+/*
+ These constants impact the cost of QSORT and priority queue sorting,
+ scaling the "n * log(n)" operations cost proportionally.
+ These factors are < 1.0 to scale down the sorting cost to be comparable
+ to 'read a row' = 1.0, (or 0.55 with default caching).
+ A factor of 0.1 makes the cost of get_pq_sort_cost(10, 10, false) =0.52
+ (Reading 10 rows into a priority queue of 10 elements).
+
+ One consenquence if this factor is too high is that priority_queue will
+ not use addon fields (to solve the sort without having to do an extra
+ re-read of rows) even if the number of LIMIT is low.
+*/
+#define QSORT_SORT_SLOWNESS_CORRECTION_FACTOR (0.1)
+#define PQ_SORT_SLOWNESS_CORRECTION_FACTOR (0.1)
+
+/*
+ Creating a record from the join cache is faster than getting a row from
+ the engine. JOIN_CACHE_ROW_COPY_COST_FACTOR is the factor used to
+ take this into account. This is multiplied with ROW_COPY_COST.
+*/
+#define JOIN_CACHE_ROW_COPY_COST_FACTOR(thd) 1.0
+
+/*
+ cost1 is better that cost2 only if cost1 + COST_EPS < cost2
+ The main purpose of this is to ensure we use the first index or plan
+ when there are identical plans. Without COST_EPS some plans in the
+ test suite would vary depending on floating point calculations done
+ in different paths.
+*/
+#define COST_EPS 0.0000001
+
+#define COST_MAX (DBL_MAX * (1.0 - DBL_EPSILON))
+
+static inline double COST_ADD(double c, double d)
+{
+ DBUG_ASSERT(c >= 0);
+ DBUG_ASSERT(d >= 0);
+ return (COST_MAX - (d) > (c) ? (c) + (d) : COST_MAX);
+}
+
+static inline double COST_MULT(double c, double f)
+{
+ DBUG_ASSERT(c >= 0);
+ DBUG_ASSERT(f >= 0);
+ return (COST_MAX / (f) > (c) ? (c) * (f) : COST_MAX);
+}
+
+OPTIMIZER_COSTS *get_optimizer_costs(const LEX_CSTRING *cache_name);
+OPTIMIZER_COSTS *create_optimizer_costs(const char *name, size_t length);
+OPTIMIZER_COSTS *get_or_create_optimizer_costs(const char *name,
+ size_t length);
+bool create_default_optimizer_costs();
+void copy_tmptable_optimizer_costs();
+void free_all_optimizer_costs();
+struct TABLE;
+
+extern "C"
+{
+ typedef int (*process_optimizer_costs_t) (const LEX_CSTRING *,
+ const OPTIMIZER_COSTS *,
+ TABLE *);
+ bool process_optimizer_costs(process_optimizer_costs_t func, TABLE *param);
+}
+
+
+#endif /* OPTIMIZER_COSTS_INCLUDED */
diff --git a/sql/optimizer_defaults.h b/sql/optimizer_defaults.h
new file mode 100644
index 00000000000..4eaa30757ce
--- /dev/null
+++ b/sql/optimizer_defaults.h
@@ -0,0 +1,190 @@
+#ifndef OPTIMIZER_DEFAULTS_INCLUDED
+#define OPTIMIZER_DEFAULTS_INCLUDED
+/*
+ Copyright (c) 2022, MariaDB AB
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; version 2 of
+ the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
+*/
+
+/*
+ This file contains costs constants used by the optimizer
+ All costs should be based on milliseconds (1 cost = 1 ms)
+*/
+
+/* Cost for finding the first key in a key scan */
+#define DEFAULT_KEY_LOOKUP_COST ((double) 0.000435777)
+
+/* Cost of finding a row based on row_ID */
+#define DEFAULT_ROW_LOOKUP_COST ((double) 0.000130839)
+
+/*
+ Cost of finding and copying key and row blocks from the storage
+ engine index cache to an internal cache as part of an index
+ scan. This includes all mutexes that needs to be taken to get
+ exclusive access to a page. The number is taken from accessing an
+ existing blocks from Aria page cache.
+ Used in handler::scan_time() and handler::keyread_time()
+*/
+#define DEFAULT_INDEX_BLOCK_COPY_COST ((double) 3.56e-05)
+
+/*
+ Cost of copying a row to 'table->record'.
+ Used by scan_time() and rnd_pos_time() methods.
+
+ If this is too small, then table scans will be prefered over 'ref'
+ as with table scans there are no key read (KEY_LOOKUP_COST), fewer
+ disk reads but more record copying and row comparisions. If it's
+ too big then MariaDB will used key lookup even when table scan is
+ better.
+*/
+#define DEFAULT_ROW_COPY_COST ((double) 0.000060866)
+
+/*
+ Cost of copying the key to 'table->record'
+
+ If this is too small, then, for small tables, index scans will be
+ prefered over 'ref' as with index scans there are fewer disk reads.
+*/
+#define DEFAULT_KEY_COPY_COST ((double) 0.000015685)
+
+/*
+ Cost of finding the next index entry and checking its rowid against filter
+ This cost is very low as it's done inside the storage engine.
+ Should be smaller than KEY_COPY_COST.
+ */
+#define DEFAULT_KEY_NEXT_FIND_COST ((double) 0.000082347)
+
+/* Cost of finding the next row when scanning a table */
+#define DEFAULT_ROW_NEXT_FIND_COST ((double) 0.000045916)
+
+/**
+ The cost of executing the WHERE clause as part of any row check.
+ Increasing this would force the optimizer to use row combinations
+ that reads fewer rows.
+ The default cost comes from recording times from a simple where clause that
+ compares two fields (date and a double) with constants.
+*/
+#define DEFAULT_WHERE_COST ((double) 3.2e-05)
+
+/* The cost of comparing a key when using range access or sorting */
+#define DEFAULT_KEY_COMPARE_COST 0.000011361
+
+/* Rowid compare is usually just a single memcmp of a short string */
+#define DEFAULT_ROWID_COMPARE_COST 0.000002653
+/* Rowid copy is usually just a single memcpy of a short string */
+#define DEFAULT_ROWID_COPY_COST 0.000002653
+
+/*
+ Cost modifiers rowid_filter. These takes into account the overhead of
+ using and calling Rowid_filter_sorted_array::check() from the engine
+*/
+#define ROWID_FILTER_PER_CHECK_MODIFIER 4 /* times key_copy_cost */
+#define ROWID_FILTER_PER_ELEMENT_MODIFIER 1 /* times rowid_compare_cost */
+
+/*
+ Average disk seek time on a hard disk is 8-10 ms, which is also
+ about the time to read a IO_SIZE (8192) block.
+
+ A medium ssd is about 400MB/second, which gives us the time for
+ reading an IO_SIZE block to IO_SIZE/400000000 = 0.0000204 sec= 0.02 ms.
+*/
+#define DEFAULT_DISK_READ_COST ((double) IO_SIZE / 400000000.0 * 1000)
+
+/*
+ The follwoing is an old comment for hard-disks, please ignore the
+ following, except if you like history:
+
+ For sequential hard disk seeks the cost formula is:
+ DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST * #blocks_to_skip
+
+ The cost of average seek
+ DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST*BLOCKS_IN_AVG_SEEK = 10.
+*/
+
+
+/*
+ The table/index cache_miss/total_cache_request ratio.
+ 1.0 means that a searched for key or row will never be in the cache while
+ 0.0 means it always in the cache (and we don't have to do any disk reads).
+
+ According to folklore, one should not have to access disk for more
+ than 20% of the cache request for MariaDB to run very well.
+ However in practice when we read rows or keys in a query, we will often
+ read the same row over and over again. Because of this we set
+ DEFAULT_DISK_READ_RATIO to 0.20/10 = 0.02.
+
+ Increasing DISK_READ_RATIO will make MariaDB prefer key lookup over
+ table scans as the impact of ROW_COPY_COST and INDEX_COPY cost will
+ have a larger impact when more rows are examined..
+
+ We are not yet taking into account cache usage statistics as this
+ could confuse users as the EXPLAIN and costs for a query would change
+ between to query calls, which may confuse users (and also make the
+ mtr tests very unpredictable).
+
+ Note that the engine's avg_io_cost() (DEFAULT_DISK_READ_COST by default)
+ is multiplied with this constant!
+*/
+
+#define DEFAULT_DISK_READ_RATIO 0.02
+
+/*
+ The following costs are mainly to ensure we don't do table and index
+ scans for small tables, like the one we have in the mtr test suite.
+
+ This is mostly to keep the mtr tests use indexes (as the optimizer would
+ if the tables are large). It will also ensure that EXPLAIN is showing
+ more key user for users where they are testing queries with small tables
+ at the start of projects.
+ This is probably OK for most a the execution time difference between table
+ scan and index scan compared to key lookups so small when using small
+ tables. It also helps to fill the index cache which will help mitigate
+ the speed difference.
+*/
+
+/*
+ Extra cost for full table and index scan. Used to prefer key and range
+ over index and table scans
+
+ INDEX_SCAN_SETUP_COST (defined in optimizer_costs.h) is half of
+ table_scan_setup_cost to get the optimizer to prefer index scans to table
+ scans as key copy is faster than row copy and index blocks provides
+ more information in the cache.
+
+ This will also help MyISAM as with MyISAM the table scans has a cost
+ very close to index scans (they are fast but require a read call
+ that we want to avoid even if it's small).
+
+ 10 usec is about 10 MyISAM row lookups with optimizer_disk_read_ratio= 0.02
+*/
+#define DEFAULT_TABLE_SCAN_SETUP_COST 0.01 // 10 usec
+
+/* Extra cost for doing a range scan. Used to prefer 'ref' over range */
+#define MULTI_RANGE_READ_SETUP_COST KEY_LOOKUP_COST
+
+/*
+ Temporary file and temporary table related costs
+ Used with subquery materialization, derived tables etc
+*/
+
+#define TMPFILE_CREATE_COST 0.5 // Cost of creating and deleting files
+#define HEAP_TEMPTABLE_CREATE_COST 0.025 // ms
+/* Cost taken from HEAP_LOOKUP_COST in ha_heap.cc */
+#define HEAP_TEMPTABLE_LOOKUP_COST (0.00016097)
+#define DISK_TEMPTABLE_LOOKUP_COST(thd) (tmp_table_optimizer_costs.key_lookup_cost + tmp_table_optimizer_costs.row_lookup_cost + tmp_table_optimizer_costs.row_copy_cost)
+#define DISK_TEMPTABLE_CREATE_COST TMPFILE_CREATE_COST*2 // 2 tmp tables
+#define DISK_TEMPTABLE_BLOCK_SIZE IO_SIZE
+
+#endif /* OPTIMIZER_DEFAULTS_INCLUDED */
diff --git a/sql/privilege.h b/sql/privilege.h
index 8e9b9a3748e..a34d3f4a172 100644
--- a/sql/privilege.h
+++ b/sql/privilege.h
@@ -224,21 +224,24 @@ static inline privilege_t& operator|=(privilege_t &a, privilege_t b)
return a= a | b;
}
+/*
+ A combination of all privileges that SUPER used to allow before 10.11.0
+*/
+constexpr privilege_t ALLOWED_BY_SUPER_BEFORE_101100= READ_ONLY_ADMIN_ACL;
/*
- A combination of all SUPER privileges added since the old user table format.
- These privileges are automatically added when upgrading from the
- old format mysql.user table if a user has the SUPER privilege.
+ A combination of all privileges that SUPER used to allow before 11.0.0
*/
-constexpr privilege_t GLOBAL_SUPER_ADDED_SINCE_USER_TABLE_ACLS=
+constexpr privilege_t ALLOWED_BY_SUPER_BEFORE_110000=
SET_USER_ACL |
FEDERATED_ADMIN_ACL |
CONNECTION_ADMIN_ACL |
- READ_ONLY_ADMIN_ACL |
REPL_SLAVE_ADMIN_ACL |
BINLOG_ADMIN_ACL |
- BINLOG_REPLAY_ACL;
-
+ BINLOG_REPLAY_ACL |
+ SLAVE_MONITOR_ACL |
+ BINLOG_MONITOR_ACL |
+ REPL_MASTER_ADMIN_ACL;
constexpr privilege_t COL_DML_ACLS=
SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL;
@@ -274,12 +277,10 @@ constexpr privilege_t PROC_ACLS=
ALTER_PROC_ACL | EXECUTE_ACL | GRANT_ACL;
constexpr privilege_t GLOBAL_ACLS=
- DB_ACLS | SHOW_DB_ACL |
- CREATE_USER_ACL | CREATE_TABLESPACE_ACL |
+ DB_ACLS | SHOW_DB_ACL | CREATE_USER_ACL | CREATE_TABLESPACE_ACL |
SUPER_ACL | RELOAD_ACL | SHUTDOWN_ACL | PROCESS_ACL | FILE_ACL |
- REPL_SLAVE_ACL | BINLOG_MONITOR_ACL |
- GLOBAL_SUPER_ADDED_SINCE_USER_TABLE_ACLS |
- REPL_MASTER_ADMIN_ACL | SLAVE_MONITOR_ACL;
+ REPL_SLAVE_ACL |
+ ALLOWED_BY_SUPER_BEFORE_101100 | ALLOWED_BY_SUPER_BEFORE_110000;
constexpr privilege_t DEFAULT_CREATE_PROC_ACLS=
ALTER_PROC_ACL | EXECUTE_ACL;
@@ -303,7 +304,7 @@ constexpr privilege_t PRIV_LOCK_TABLES= SELECT_ACL | LOCK_TABLES_ACL;
CREATE DEFINER=xxx {TRIGGER|VIEW|FUNCTION|PROCEDURE}
Was SUPER prior to 10.5.2
*/
-constexpr privilege_t PRIV_DEFINER_CLAUSE= SET_USER_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_DEFINER_CLAUSE= SET_USER_ACL;
/*
If a VIEW has a `definer=invoker@host` clause and
the specified definer does not exists, then
@@ -319,7 +320,7 @@ constexpr privilege_t PRIV_DEFINER_CLAUSE= SET_USER_ACL | SUPER_ACL;
Was SUPER prior to 10.5.2
*/
-constexpr privilege_t PRIV_REVEAL_MISSING_DEFINER= SET_USER_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_REVEAL_MISSING_DEFINER= SET_USER_ACL;
/* Actions that require only the SUPER privilege */
constexpr privilege_t PRIV_DES_DECRYPT_ONE_ARG= SUPER_ACL;
@@ -330,119 +331,117 @@ constexpr privilege_t PRIV_SET_RESTRICTED_SESSION_SYSTEM_VARIABLE= SUPER_ACL;
/* The following variables respected only SUPER_ACL prior to 10.5.2 */
constexpr privilege_t PRIV_SET_SYSTEM_VAR_BINLOG_FORMAT=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_VAR_BINLOG_DIRECT_NON_TRANSACTIONAL_UPDATES=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_VAR_BINLOG_ANNOTATE_ROW_EVENTS=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_VAR_BINLOG_ROW_IMAGE=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_VAR_SQL_LOG_BIN=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_BINLOG_CACHE_SIZE=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_BINLOG_FILE_CACHE_SIZE=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_BINLOG_STMT_CACHE_SIZE=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_BINLOG_COMMIT_WAIT_COUNT=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_BINLOG_COMMIT_WAIT_USEC=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_BINLOG_ROW_METADATA=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_EXPIRE_LOGS_DAYS=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_LOG_BIN_COMPRESS=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_LOG_BIN_COMPRESS_MIN_LEN=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_LOG_BIN_TRUST_FUNCTION_CREATORS=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_MAX_BINLOG_CACHE_SIZE=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_MAX_BINLOG_STMT_CACHE_SIZE=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_MAX_BINLOG_SIZE=
- SUPER_ACL | BINLOG_ADMIN_ACL;
+ BINLOG_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SYNC_BINLOG=
- SUPER_ACL | BINLOG_ADMIN_ACL;
-
+ BINLOG_ADMIN_ACL;
/* Privileges related to --read-only */
// Was super prior to 10.5.2
constexpr privilege_t PRIV_IGNORE_READ_ONLY= READ_ONLY_ADMIN_ACL;
// Was super prior to 10.5.2
-constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_READ_ONLY=
- READ_ONLY_ADMIN_ACL;
+constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_READ_ONLY= READ_ONLY_ADMIN_ACL;
/*
Privileges related to connection handling.
*/
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_IGNORE_INIT_CONNECT= CONNECTION_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_IGNORE_INIT_CONNECT= CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_IGNORE_MAX_USER_CONNECTIONS= CONNECTION_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_IGNORE_MAX_USER_CONNECTIONS= CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_IGNORE_MAX_CONNECTIONS= CONNECTION_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_IGNORE_MAX_CONNECTIONS= CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_IGNORE_MAX_PASSWORD_ERRORS= CONNECTION_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_IGNORE_MAX_PASSWORD_ERRORS= CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_KILL_OTHER_USER_PROCESS= CONNECTION_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_KILL_OTHER_USER_PROCESS= CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_CONNECT_TIMEOUT=
- CONNECTION_ADMIN_ACL | SUPER_ACL;
+ CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_DISCONNECT_ON_EXPIRED_PASSWORD=
- CONNECTION_ADMIN_ACL | SUPER_ACL;
+ CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_EXTRA_MAX_CONNECTIONS=
- CONNECTION_ADMIN_ACL | SUPER_ACL;
+ CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_INIT_CONNECT=
- CONNECTION_ADMIN_ACL | SUPER_ACL;
+ CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_MAX_CONNECTIONS=
- CONNECTION_ADMIN_ACL | SUPER_ACL;
+ CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_MAX_CONNECT_ERRORS=
- CONNECTION_ADMIN_ACL | SUPER_ACL;
+ CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_MAX_PASSWORD_ERRORS=
- CONNECTION_ADMIN_ACL | SUPER_ACL;
+ CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_PROXY_PROTOCOL_NETWORKS=
- CONNECTION_ADMIN_ACL | SUPER_ACL;
+ CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SECURE_AUTH=
- CONNECTION_ADMIN_ACL | SUPER_ACL;
+ CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLOW_LAUNCH_TIME=
- CONNECTION_ADMIN_ACL | SUPER_ACL;
+ CONNECTION_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_THREAD_POOL=
- CONNECTION_ADMIN_ACL | SUPER_ACL;
+ CONNECTION_ADMIN_ACL;
/*
@@ -456,16 +455,16 @@ constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_THREAD_POOL=
Was SUPER_ACL | REPL_CLIENT_ACL prior to 10.5.2
REPL_CLIENT_ACL was renamed to BINLOG_MONITOR_ACL.
*/
-constexpr privilege_t PRIV_STMT_SHOW_BINLOG_STATUS= BINLOG_MONITOR_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_STMT_SHOW_BINLOG_STATUS= BINLOG_MONITOR_ACL;
/*
Was SUPER_ACL | REPL_CLIENT_ACL prior to 10.5.2
REPL_CLIENT_ACL was renamed to BINLOG_MONITOR_ACL.
*/
-constexpr privilege_t PRIV_STMT_SHOW_BINARY_LOGS= BINLOG_MONITOR_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_STMT_SHOW_BINARY_LOGS= BINLOG_MONITOR_ACL;
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_STMT_PURGE_BINLOG= BINLOG_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_STMT_PURGE_BINLOG= BINLOG_ADMIN_ACL;
// Was REPL_SLAVE_ACL prior to 10.5.2
constexpr privilege_t PRIV_STMT_SHOW_BINLOG_EVENTS= BINLOG_MONITOR_ACL;
@@ -485,39 +484,39 @@ constexpr privilege_t PRIV_STMT_SHOW_SLAVE_HOSTS= REPL_MASTER_ADMIN_ACL;
Where SUPER prior to 10.5.2
*/
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_RPL_SEMI_SYNC_MASTER_ENABLED=
- REPL_MASTER_ADMIN_ACL | SUPER_ACL;
+ REPL_MASTER_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_RPL_SEMI_SYNC_MASTER_TIMEOUT=
- REPL_MASTER_ADMIN_ACL | SUPER_ACL;
+ REPL_MASTER_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_RPL_SEMI_SYNC_MASTER_WAIT_NO_SLAVE=
- REPL_MASTER_ADMIN_ACL | SUPER_ACL;
+ REPL_MASTER_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_RPL_SEMI_SYNC_MASTER_TRACE_LEVEL=
- REPL_MASTER_ADMIN_ACL | SUPER_ACL;
+ REPL_MASTER_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_RPL_SEMI_SYNC_MASTER_WAIT_POINT=
- REPL_MASTER_ADMIN_ACL | SUPER_ACL;
+ REPL_MASTER_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_MASTER_VERIFY_CHECKSUM=
- REPL_MASTER_ADMIN_ACL | SUPER_ACL;
+ REPL_MASTER_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_GTID_BINLOG_STATE=
- REPL_MASTER_ADMIN_ACL | SUPER_ACL;
+ REPL_MASTER_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SERVER_ID=
- REPL_MASTER_ADMIN_ACL | SUPER_ACL;
+ REPL_MASTER_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_GTID_DOMAIN_ID=
- REPL_MASTER_ADMIN_ACL | SUPER_ACL;
+ REPL_MASTER_ADMIN_ACL;
/* Privileges for statements that are executed on the slave */
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_STMT_START_SLAVE= REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_STMT_START_SLAVE= REPL_SLAVE_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_STMT_STOP_SLAVE= REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_STMT_STOP_SLAVE= REPL_SLAVE_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_STMT_CHANGE_MASTER= REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_STMT_CHANGE_MASTER= REPL_SLAVE_ADMIN_ACL;
// Was (SUPER_ACL | REPL_CLIENT_ACL) prior to 10.5.2
// Was (SUPER_ACL | REPL_SLAVE_ADMIN_ACL) from 10.5.2 to 10.5.7
-constexpr privilege_t PRIV_STMT_SHOW_SLAVE_STATUS= SLAVE_MONITOR_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_STMT_SHOW_SLAVE_STATUS= SLAVE_MONITOR_ACL;
// Was REPL_SLAVE_ACL prior to 10.5.2
// Was REPL_SLAVE_ADMIN_ACL from 10.5.2 to 10.5.7
constexpr privilege_t PRIV_STMT_SHOW_RELAYLOG_EVENTS= SLAVE_MONITOR_ACL;
@@ -526,114 +525,114 @@ constexpr privilege_t PRIV_STMT_SHOW_RELAYLOG_EVENTS= SLAVE_MONITOR_ACL;
Privileges related to binlog replying.
Were SUPER_ACL prior to 10.5.2
*/
-constexpr privilege_t PRIV_STMT_BINLOG= BINLOG_REPLAY_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_STMT_BINLOG= BINLOG_REPLAY_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_SESSION_VAR_GTID_SEQ_NO=
- BINLOG_REPLAY_ACL | SUPER_ACL;
+ BINLOG_REPLAY_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_SESSION_VAR_PSEUDO_THREAD_ID=
- BINLOG_REPLAY_ACL | SUPER_ACL;
+ BINLOG_REPLAY_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_SESSION_VAR_SERVER_ID=
- BINLOG_REPLAY_ACL | SUPER_ACL;
+ BINLOG_REPLAY_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_SESSION_VAR_GTID_DOMAIN_ID=
- BINLOG_REPLAY_ACL | SUPER_ACL;
+ BINLOG_REPLAY_ACL;
/*
Privileges for slave related global variables.
Were SUPER prior to 10.5.2.
*/
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_REPLICATE_EVENTS_MARKED_FOR_SKIP=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_REPLICATE_REWRITE_DB=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_REPLICATE_DO_DB=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_REPLICATE_DO_TABLE=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_REPLICATE_IGNORE_DB=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_REPLICATE_IGNORE_TABLE=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_REPLICATE_WILD_DO_TABLE=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_REPLICATE_WILD_IGNORE_TABLE=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_READ_BINLOG_SPEED_LIMIT=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_COMPRESSED_PROTOCOL=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_DDL_EXEC_MODE=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_DOMAIN_PARALLEL_THREADS=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_EXEC_MODE=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_MAX_ALLOWED_PACKET=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_MAX_STATEMENT_TIME=
REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_NET_TIMEOUT=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_PARALLEL_MAX_QUEUED=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_PARALLEL_MODE=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_PARALLEL_THREADS=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_PARALLEL_WORKERS=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_RUN_TRIGGERS_FOR_RBR=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_SQL_VERIFY_CHECKSUM=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_TRANSACTION_RETRY_INTERVAL=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SLAVE_TYPE_CONVERSIONS=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_INIT_SLAVE=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_RPL_SEMI_SYNC_SLAVE_ENABLED=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_RPL_SEMI_SYNC_SLAVE_TRACE_LEVEL=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_RPL_SEMI_SYNC_SLAVE_DELAY_MASTER=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_RPL_SEMI_SYNC_SLAVE_KILL_CONN_TIMEOUT=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_RELAY_LOG_PURGE=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_RELAY_LOG_RECOVERY=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SYNC_MASTER_INFO=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SYNC_RELAY_LOG=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_SYNC_RELAY_LOG_INFO=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_GTID_CLEANUP_BATCH_SIZE=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_GTID_IGNORE_DUPLICATES=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_GTID_POS_AUTO_ENGINES=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_GTID_SLAVE_POS=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
constexpr privilege_t PRIV_SET_SYSTEM_GLOBAL_VAR_GTID_STRICT_MODE=
- REPL_SLAVE_ADMIN_ACL | SUPER_ACL;
+ REPL_SLAVE_ADMIN_ACL;
/* Privileges for federated database related statements */
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_STMT_CREATE_SERVER= FEDERATED_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_STMT_CREATE_SERVER= FEDERATED_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_STMT_ALTER_SERVER= FEDERATED_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_STMT_ALTER_SERVER= FEDERATED_ADMIN_ACL;
// Was SUPER_ACL prior to 10.5.2
-constexpr privilege_t PRIV_STMT_DROP_SERVER= FEDERATED_ADMIN_ACL | SUPER_ACL;
+constexpr privilege_t PRIV_STMT_DROP_SERVER= FEDERATED_ADMIN_ACL;
/* Privileges related to processes */
diff --git a/sql/records.cc b/sql/records.cc
index 3aad36ca862..5b2ebefe14f 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -400,11 +400,8 @@ static int rr_handle_error(READ_RECORD *info, int error)
static int rr_quick(READ_RECORD *info)
{
int tmp;
- while ((tmp= info->select->quick->get_next()))
- {
+ if ((tmp= info->select->quick->get_next()))
tmp= rr_handle_error(info, tmp);
- break;
- }
return tmp;
}
@@ -427,16 +424,14 @@ static int rr_index_first(READ_RECORD *info)
int tmp;
// tell handler that we are doing an index scan
if ((tmp = info->table->file->prepare_index_scan()))
- {
- tmp= rr_handle_error(info, tmp);
- return tmp;
- }
+ goto err;
- tmp= info->table->file->ha_index_first(info->record());
info->read_record_func= rr_index;
- if (tmp)
- tmp= rr_handle_error(info, tmp);
- return tmp;
+ if (!(tmp= info->table->file->ha_index_first(info->record())))
+ return tmp;
+
+err:
+ return rr_handle_error(info, tmp);
}
@@ -455,9 +450,9 @@ static int rr_index_first(READ_RECORD *info)
static int rr_index_last(READ_RECORD *info)
{
- int tmp= info->table->file->ha_index_last(info->record());
+ int tmp;
info->read_record_func= rr_index_desc;
- if (tmp)
+ if ((tmp= info->table->file->ha_index_last(info->record())))
tmp= rr_handle_error(info, tmp);
return tmp;
}
diff --git a/sql/rowid_filter.cc b/sql/rowid_filter.cc
index 0589b587ba2..47846fa82fd 100644
--- a/sql/rowid_filter.cc
+++ b/sql/rowid_filter.cc
@@ -19,17 +19,21 @@
#include "sql_class.h"
#include "opt_range.h"
#include "rowid_filter.h"
+#include "optimizer_defaults.h"
#include "sql_select.h"
#include "opt_trace.h"
+/*
+ key_next_find_cost below is the cost of finding the next possible key
+ and calling handler_rowid_filter_check() to check it against the filter
+*/
-inline
-double Range_rowid_filter_cost_info::lookup_cost(
- Rowid_filter_container_type cont_type)
+double Range_rowid_filter_cost_info::
+lookup_cost(Rowid_filter_container_type cont_type)
{
switch (cont_type) {
case SORTED_ARRAY_CONTAINER:
- return log(est_elements)*0.01;
+ return log2(est_elements) * rowid_compare_cost + base_lookup_cost;
default:
DBUG_ASSERT(0);
return 0;
@@ -39,14 +43,16 @@ double Range_rowid_filter_cost_info::lookup_cost(
/**
@brief
- The average gain in cost per row to use the range filter with this cost info
+ The average gain in cost per row to use the range filter with this cost
+ info
*/
inline
-double Range_rowid_filter_cost_info::avg_access_and_eval_gain_per_row(
- Rowid_filter_container_type cont_type)
+double Range_rowid_filter_cost_info::
+avg_access_and_eval_gain_per_row(Rowid_filter_container_type cont_type,
+ double cost_of_row_fetch)
{
- return (1+1.0/TIME_FOR_COMPARE) * (1 - selectivity) -
+ return (cost_of_row_fetch + where_cost) * (1 - selectivity) -
lookup_cost(cont_type);
}
@@ -58,8 +64,9 @@ double Range_rowid_filter_cost_info::avg_access_and_eval_gain_per_row(
@param access_cost_factor the adjusted cost of access a row
@details
- The current code to estimate the cost of a ref access is quite inconsistent:
- in some cases the effect of page buffers is taken into account, for others
+ The current code to estimate the cost of a ref access is quite
+ inconsistent:
+ In some cases the effect of page buffers is taken into account, for others
just the engine dependent read_time() is employed. That's why the average
cost of one random seek might differ from 1.
The parameter access_cost_factor can be considered as the cost of a random
@@ -74,10 +81,11 @@ double Range_rowid_filter_cost_info::avg_access_and_eval_gain_per_row(
*/
inline
-double Range_rowid_filter_cost_info::avg_adjusted_gain_per_row(
- double access_cost_factor)
+double Range_rowid_filter_cost_info::
+avg_adjusted_gain_per_row(double access_cost_factor)
{
- return a - (1 - access_cost_factor) * (1 - selectivity);
+ DBUG_ASSERT(access_cost_factor >= 0.0 && access_cost_factor <= 1.0);
+ return gain - (1 - access_cost_factor) * (1 - selectivity);
}
@@ -91,10 +99,11 @@ double Range_rowid_filter_cost_info::avg_adjusted_gain_per_row(
*/
inline void
-Range_rowid_filter_cost_info::set_adjusted_gain_param(double access_cost_factor)
+Range_rowid_filter_cost_info::
+set_adjusted_gain_param(double access_cost_factor)
{
- a_adj= avg_adjusted_gain_per_row(access_cost_factor);
- cross_x_adj= b / a_adj;
+ gain_adj= avg_adjusted_gain_per_row(access_cost_factor);
+ cross_x_adj= cost_of_building_range_filter / gain_adj;
}
@@ -116,13 +125,20 @@ void Range_rowid_filter_cost_info::init(Rowid_filter_container_type cont_type,
table= tab;
key_no= idx;
est_elements= (ulonglong) table->opt_range[key_no].rows;
- b= build_cost(container_type);
+ cost_of_building_range_filter= build_cost(container_type);
+
+ where_cost= tab->in_use->variables.optimizer_where_cost;
+ base_lookup_cost= (ROWID_FILTER_PER_CHECK_MODIFIER *
+ tab->file->KEY_COPY_COST);
+ rowid_compare_cost= (ROWID_FILTER_PER_ELEMENT_MODIFIER *
+ tab->file->ROWID_COMPARE_COST);
selectivity= est_elements/((double) table->stat_records());
- a= avg_access_and_eval_gain_per_row(container_type);
- if (a > 0)
- cross_x= b/a;
+ gain= avg_access_and_eval_gain_per_row(container_type,
+ tab->file->ROW_LOOKUP_COST);
+ if (gain > 0)
+ cross_x= cost_of_building_range_filter/gain;
else
- cross_x= b+1;
+ cross_x= cost_of_building_range_filter+1;
abs_independent.clear_all();
}
@@ -135,16 +151,19 @@ void Range_rowid_filter_cost_info::init(Rowid_filter_container_type cont_type,
double
Range_rowid_filter_cost_info::build_cost(Rowid_filter_container_type cont_type)
{
- double cost= 0;
+ double cost;
+ OPTIMIZER_COSTS *costs= &table->s->optimizer_costs;
DBUG_ASSERT(table->opt_range_keys.is_set(key_no));
- cost+= table->opt_range[key_no].index_only_cost;
+ /* Cost of fetching keys */
+ cost= table->opt_range[key_no].index_only_fetch_cost(table);
switch (cont_type) {
-
case SORTED_ARRAY_CONTAINER:
- cost+= ARRAY_WRITE_COST * est_elements; /* cost filling the container */
- cost+= ARRAY_SORT_C * est_elements * log(est_elements); /* sorting cost */
+ /* Add cost of filling container and cost of sorting */
+ cost+= (est_elements *
+ (costs->rowid_copy_cost + // Copying rowid
+ costs->rowid_cmp_cost * log2(est_elements))); // Sort
break;
default:
DBUG_ASSERT(0);
@@ -177,7 +196,7 @@ int compare_range_rowid_filter_cost_info_by_a(
Range_rowid_filter_cost_info **filter_ptr_1,
Range_rowid_filter_cost_info **filter_ptr_2)
{
- double diff= (*filter_ptr_2)->get_a() - (*filter_ptr_1)->get_a();
+ double diff= (*filter_ptr_2)->get_gain() - (*filter_ptr_1)->get_gain();
return (diff < 0 ? -1 : (diff > 0 ? 1 : 0));
}
@@ -204,7 +223,8 @@ void TABLE::prune_range_rowid_filters()
the elements if this bit matrix.
*/
- Range_rowid_filter_cost_info **filter_ptr_1= range_rowid_filter_cost_info_ptr;
+ Range_rowid_filter_cost_info **filter_ptr_1=
+ range_rowid_filter_cost_info_ptr;
for (uint i= 0;
i < range_rowid_filter_cost_info_elems;
i++, filter_ptr_1++)
@@ -243,7 +263,7 @@ void TABLE::prune_range_rowid_filters()
*/
Range_rowid_filter_cost_info **cand_filter_ptr=
- range_rowid_filter_cost_info_ptr;
+ range_rowid_filter_cost_info_ptr;
for (uint i= 0;
i < range_rowid_filter_cost_info_elems;
i++, cand_filter_ptr++)
@@ -361,9 +381,7 @@ void TABLE::init_cost_info_for_usable_range_rowid_filters(THD *thd)
*/
while ((key_no= it++) != key_map::Iterator::BITMAP_END)
{
- if (!(file->index_flags(key_no, 0, 1) & HA_DO_RANGE_FILTER_PUSHDOWN)) // !1
- continue;
- if (file->is_clustering_key(key_no)) // !2
+ if (!can_use_rowid_filter(key_no)) // 1 & 2
continue;
if (opt_range[key_no].rows >
get_max_range_rowid_filter_elems_for_table(thd, this,
@@ -418,6 +436,7 @@ void TABLE::init_cost_info_for_usable_range_rowid_filters(THD *thd)
void TABLE::trace_range_rowid_filters(THD *thd) const
{
+ DBUG_ASSERT(thd->trace_started());
if (!range_rowid_filter_cost_info_elems)
return;
@@ -435,45 +454,55 @@ void TABLE::trace_range_rowid_filters(THD *thd) const
void Range_rowid_filter_cost_info::trace_info(THD *thd)
{
+ DBUG_ASSERT(thd->trace_started());
Json_writer_object js_obj(thd);
- js_obj.add("key", table->key_info[key_no].name);
- js_obj.add("build_cost", b);
- js_obj.add("rows", est_elements);
+ js_obj.
+ add("key", table->key_info[key_no].name).
+ add("build_cost", cost_of_building_range_filter).
+ add("rows", est_elements);
}
/**
@brief
Choose the best range filter for the given access of the table
- @param access_key_no The index by which the table is accessed
- @param records The estimated total number of key tuples with this access
- @param access_cost_factor the cost of a random seek to access the table
-
+ @param access_key_no The index by which the table is accessed
+ @param records The estimated total number of key tuples with
+ this access
+ @param fetch_cost_factor The cost of fetching 'records' rows
+ @param index_only_cost The cost of fetching 'records' rows with
+ index only reads
+ @param prev_records How many index_read_calls() we expect to make
+ @parma records_out Will be updated to the minimum result rows for any
+ usable filter.
@details
The function looks through the array of cost info for range filters
and chooses the element for the range filter that promise the greatest
gain with the the ref or range access of the table by access_key_no.
- As the array is sorted by cross_x in ascending order the function stops
- the look through as soon as it reaches the first element with
- cross_x_adj > records because the range filter for this element and the
- range filters for all remaining elements do not promise positive gains.
- @note
- It is easy to see that if cross_x[i] > cross_x[j] then
- cross_x_adj[i] > cross_x_adj[j]
+ The function assumes that caller has checked that the key is not a clustered
+ key. See best_access_path().
@retval Pointer to the cost info for the range filter that promises
the greatest gain, NULL if there is no such range filter
*/
Range_rowid_filter_cost_info *
-TABLE::best_range_rowid_filter_for_partial_join(uint access_key_no,
- double records,
- double access_cost_factor)
+TABLE::best_range_rowid_filter(uint access_key_no, double records,
+ double fetch_cost, double index_only_cost,
+ double prev_records, double *records_out)
{
if (range_rowid_filter_cost_info_elems == 0 ||
covering_keys.is_set(access_key_no))
return 0;
+ /*
+ Currently we do not support usage of range filters if the table
+ is accessed by the clustered primary key. It does not make sense
+ if a full key is used. If the table is accessed by a partial
+ clustered primary key it would, but the current InnoDB code does not
+ allow it. Later this limitation may be lifted.
+ */
+ DBUG_ASSERT(!file->is_clustering_key(access_key_no));
// Disallow use of range filter if the key contains partially-covered
// columns.
@@ -483,46 +512,38 @@ TABLE::best_range_rowid_filter_for_partial_join(uint access_key_no,
return 0;
}
- /*
- Currently we do not support usage of range filters if the table
- is accessed by the clustered primary key. It does not make sense
- if a full key is used. If the table is accessed by a partial
- clustered primary key it would, but the current InnoDB code does not
- allow it. Later this limitation will be lifted
- */
- if (file->is_clustering_key(access_key_no))
- return 0;
-
Range_rowid_filter_cost_info *best_filter= 0;
- double best_filter_gain= 0;
+ double best_filter_gain= DBL_MAX;
key_map no_filter_usage= key_info[access_key_no].overlapped;
no_filter_usage.merge(key_info[access_key_no].constraint_correlated);
+ no_filter_usage.set_bit(access_key_no);
for (uint i= 0; i < range_rowid_filter_cost_info_elems ; i++)
{
- double curr_gain = 0;
+ double new_cost, new_total_cost, new_records;
+ double cost_of_accepted_rows, cost_of_rejected_rows;
Range_rowid_filter_cost_info *filter= range_rowid_filter_cost_info_ptr[i];
/*
Do not use a range filter that uses an in index correlated with
the index by which the table is accessed
*/
- if ((filter->key_no == access_key_no) ||
- no_filter_usage.is_set(filter->key_no))
+ if (no_filter_usage.is_set(filter->key_no))
continue;
- filter->set_adjusted_gain_param(access_cost_factor);
-
- if (records < filter->cross_x_adj)
- {
- /* Does not make sense to look through the remaining filters */
- break;
- }
-
- curr_gain= filter->get_adjusted_gain(records);
- if (best_filter_gain < curr_gain)
+ new_records= records * filter->selectivity;
+ set_if_smaller(*records_out, new_records);
+ cost_of_accepted_rows= fetch_cost * filter->selectivity;
+ cost_of_rejected_rows= index_only_cost * (1 - filter->selectivity);
+ new_cost= (cost_of_accepted_rows + cost_of_rejected_rows +
+ records * filter->lookup_cost());
+ new_total_cost= ((new_cost + new_records *
+ in_use->variables.optimizer_where_cost) *
+ prev_records + filter->get_setup_cost());
+
+ if (best_filter_gain > new_total_cost)
{
- best_filter_gain= curr_gain;
+ best_filter_gain= new_total_cost;
best_filter= filter;
}
}
@@ -570,41 +591,40 @@ bool Range_rowid_filter::fill()
file->pushed_idx_cond_keyno= MAX_KEY;
file->in_range_check_pushed_down= false;
- /* We're going to just read rowids / primary keys */
+ /* We're going to just read rowids / clustered primary keys */
table->prepare_for_position();
- table->file->ha_start_keyread(quick->index);
+ file->ha_start_keyread(quick->index);
if (quick->init() || quick->reset())
- rc= 1;
+ goto end;
- while (!rc)
+ while (!(rc= quick->get_next()))
{
- rc= quick->get_next();
- if (thd->killed)
- rc= 1;
- if (!rc)
+ file->position(quick->record);
+ if (container->add(NULL, (char*) file->ref) || thd->killed)
{
- file->position(quick->record);
- if (container->add(NULL, (char*) file->ref))
- rc= 1;
- else
- tracker->increment_container_elements_count();
+ rc= 1;
+ break;
}
}
+end:
quick->range_end();
- table->file->ha_end_keyread();
+ file->ha_end_keyread();
table->status= table_status_save;
file->pushed_idx_cond= pushed_idx_cond_save;
file->pushed_idx_cond_keyno= pushed_idx_cond_keyno_save;
file->in_range_check_pushed_down= in_range_check_pushed_down_save;
- tracker->report_container_buff_size(table->file->ref_length);
+
+ tracker->set_container_elements_count(container->elements());
+ tracker->report_container_buff_size(file->ref_length);
if (rc != HA_ERR_END_OF_FILE)
return 1;
- table->file->rowid_filter_is_active= true;
+ container->sort(refpos_order_cmp, (void *) file);
+ file->rowid_filter_is_active= container->elements() != 0;
return 0;
}
@@ -628,18 +648,13 @@ bool Range_rowid_filter::fill()
bool Rowid_filter_sorted_array::check(void *ctxt, char *elem)
{
- TABLE *table= (TABLE *) ctxt;
- if (!is_checked)
- {
- refpos_container.sort(refpos_order_cmp, (void *) (table->file));
- is_checked= true;
- }
+ handler *file= ((TABLE *) ctxt)->file;
int l= 0;
int r= refpos_container.elements()-1;
while (l <= r)
{
int m= (l + r) / 2;
- int cmp= refpos_order_cmp((void *) (table->file),
+ int cmp= refpos_order_cmp((void *) file,
refpos_container.get_pos(m), elem);
if (cmp == 0)
return true;
@@ -656,14 +671,6 @@ Range_rowid_filter::~Range_rowid_filter()
{
delete container;
container= 0;
- if (select)
- {
- if (select->quick)
- {
- delete select->quick;
- select->quick= 0;
- }
- delete select;
- select= 0;
- }
+ delete select;
+ select= 0;
}
diff --git a/sql/rowid_filter.h b/sql/rowid_filter.h
index 02962f3e677..8f3f3a10925 100644
--- a/sql/rowid_filter.h
+++ b/sql/rowid_filter.h
@@ -143,13 +143,6 @@ class SQL_SELECT;
class Rowid_filter_container;
class Range_rowid_filter_cost_info;
-/* Cost to write rowid into array */
-#define ARRAY_WRITE_COST 0.005
-/* Factor used to calculate cost of sorting rowids in array */
-#define ARRAY_SORT_C 0.01
-/* Cost to evaluate condition */
-#define COST_COND_EVAL 0.2
-
typedef enum
{
SORTED_ARRAY_CONTAINER,
@@ -193,7 +186,10 @@ public:
virtual bool check(void *ctxt, char *elem) = 0;
/* True if the container does not contain any element */
- virtual bool is_empty() = 0;
+ bool is_empty() { return elements() == 0; }
+ virtual uint elements() = 0;
+ virtual void sort (int (*cmp) (void *ctxt, const void *el1, const void *el2),
+ void *cmp_arg) = 0;
virtual ~Rowid_filter_container() = default;
};
@@ -269,9 +265,9 @@ public:
~Range_rowid_filter();
- bool build() { return fill(); }
+ bool build() override { return fill(); }
- bool check(char *elem)
+ bool check(char *elem) override
{
if (container->is_empty())
return false;
@@ -303,52 +299,49 @@ class Refpos_container_sorted_array : public Sql_alloc
/* Number of bytes allocated for an element */
uint elem_size;
/* The dynamic array over which the wrapper is built */
- Dynamic_array<char> *array;
+ DYNAMIC_ARRAY array;
+ DYNAMIC_ARRAY_APPEND append;
public:
Refpos_container_sorted_array(uint max_elems, uint elem_sz)
- : max_elements(max_elems), elem_size(elem_sz), array(0) {}
+ :max_elements(max_elems), elem_size(elem_sz)
+ {
+ bzero(&array, sizeof(array));
+ }
~Refpos_container_sorted_array()
{
- delete array;
- array= 0;
+ delete_dynamic(&array);
}
bool alloc()
{
- array= new Dynamic_array<char> (PSI_INSTRUMENT_MEM,
- elem_size * max_elements,
- elem_size * max_elements/sizeof(char) + 1);
- return array == NULL;
+ /* This can never fail as things will be allocated on demand */
+ init_dynamic_array2(PSI_INSTRUMENT_MEM, &array, elem_size, 0,
+ max_elements, 512, MYF(0));
+ init_append_dynamic(&append, &array);
+ return 0;
}
- bool add(char *elem)
+ bool add(const char *elem)
{
- for (uint i= 0; i < elem_size; i++)
- {
- if (array->append(elem[i]))
- return true;
- }
- return false;
+ return append_dynamic(&append, elem);
}
- char *get_pos(uint n)
+ inline uchar *get_pos(uint n) const
{
- return array->get_pos(n * elem_size);
+ return dynamic_array_ptr(&array, n);
}
- uint elements() { return (uint) (array->elements() / elem_size); }
+ inline uint elements() const { return (uint) array.elements; }
void sort (int (*cmp) (void *ctxt, const void *el1, const void *el2),
void *cmp_arg)
{
- my_qsort2(array->front(), array->elements()/elem_size,
+ my_qsort2(array.buffer, array.elements,
elem_size, (qsort2_cmp) cmp, cmp_arg);
}
-
- bool is_empty() { return elements() == 0; }
};
@@ -363,23 +356,29 @@ class Rowid_filter_sorted_array: public Rowid_filter_container
{
/* The dynamic array to store rowids / primary keys */
Refpos_container_sorted_array refpos_container;
- /* Initially false, becomes true after the first call of (check() */
- bool is_checked;
public:
Rowid_filter_sorted_array(uint elems, uint elem_size)
- : refpos_container(elems, elem_size), is_checked(false) {}
+ : refpos_container(elems, elem_size) {}
- Rowid_filter_container_type get_type()
+ Rowid_filter_container_type get_type() override
{ return SORTED_ARRAY_CONTAINER; }
- bool alloc() { return refpos_container.alloc(); }
+ bool alloc() override { return refpos_container.alloc(); }
- bool add(void *ctxt, char *elem) { return refpos_container.add(elem); }
+ bool add(void *ctxt, char *elem) override
+ { return refpos_container.add(elem); }
- bool check(void *ctxt, char *elem);
+ bool check(void *ctxt, char *elem) override;
+
+ uint elements() override { return refpos_container.elements(); }
+
+ void sort (int (*cmp) (void *ctxt, const void *el1, const void *el2),
+ void *cmp_arg) override
+ {
+ return refpos_container.sort(cmp, cmp_arg);
+ }
- bool is_empty() { return refpos_container.is_empty(); }
};
/**
@@ -390,20 +389,24 @@ public:
whether usage of the range filter promises some gain.
*/
-class Range_rowid_filter_cost_info : public Sql_alloc
+class Range_rowid_filter_cost_info final: public Sql_alloc
{
/* The table for which the range filter is to be built (if needed) */
TABLE *table;
/* Estimated number of elements in the filter */
ulonglong est_elements;
- /* The cost of building the range filter */
- double b;
+ /* The index whose range scan would be used to build the range filter */
+ uint key_no;
+ double cost_of_building_range_filter;
+ double where_cost, base_lookup_cost, rowid_compare_cost;
+
/*
- a*N-b yields the gain of the filter
- for N key tuples of the index key_no
+ (gain*row_combinations)-cost_of_building_range_filter yields the gain of
+ the filter for 'row_combinations' key tuples of the index key_no
+ calculated with avg_access_and_eval_gain_per_row(container_type);
*/
- double a;
- /* The value of N where the gain is 0 */
+ double gain;
+ /* The value of row_combinations where the gain is 0 */
double cross_x;
/* Used for pruning of the potential range filters */
key_map abs_independent;
@@ -412,16 +415,14 @@ class Range_rowid_filter_cost_info : public Sql_alloc
These two parameters are used to choose the best range filter
in the function TABLE::best_range_rowid_filter_for_partial_join
*/
- double a_adj;
+ double gain_adj;
double cross_x_adj;
public:
- /* The type of the container of the range filter */
- Rowid_filter_container_type container_type;
- /* The index whose range scan would be used to build the range filter */
- uint key_no;
/* The selectivity of the range filter */
double selectivity;
+ /* The type of the container of the range filter */
+ Rowid_filter_container_type container_type;
Range_rowid_filter_cost_info() : table(0), key_no(0) {}
@@ -430,39 +431,44 @@ public:
double build_cost(Rowid_filter_container_type container_type);
- inline double lookup_cost(Rowid_filter_container_type cont_type);
+ double lookup_cost(Rowid_filter_container_type cont_type);
+ inline double lookup_cost() { return lookup_cost(container_type); }
inline double
- avg_access_and_eval_gain_per_row(Rowid_filter_container_type cont_type);
+ avg_access_and_eval_gain_per_row(Rowid_filter_container_type cont_type,
+ double cost_of_row_fetch);
inline double avg_adjusted_gain_per_row(double access_cost_factor);
inline void set_adjusted_gain_param(double access_cost_factor);
/* Get the gain that usage of filter promises for r key tuples */
- inline double get_gain(double r)
+ inline double get_gain(double row_combinations)
{
- return r * a - b;
+ return row_combinations * gain - cost_of_building_range_filter;
}
/* Get the adjusted gain that usage of filter promises for r key tuples */
- inline double get_adjusted_gain(double r)
+ inline double get_adjusted_gain(double row_combinations)
{
- return r * a_adj - b;
+ return row_combinations * gain_adj - cost_of_building_range_filter;
}
/*
The gain promised by usage of the filter for r key tuples
due to less condition evaluations
*/
- inline double get_cmp_gain(double r)
+ inline double get_cmp_gain(double row_combinations)
{
- return r * (1 - selectivity) / TIME_FOR_COMPARE;
+ return (row_combinations * (1 - selectivity) * where_cost);
}
Rowid_filter_container *create_container();
- double get_a() { return a; }
+ double get_setup_cost() { return cost_of_building_range_filter; }
+ double get_lookup_cost();
+ double get_gain() { return gain; }
+ uint get_key_no() { return key_no; }
void trace_info(THD *thd);
@@ -472,11 +478,20 @@ public:
friend
void TABLE::init_cost_info_for_usable_range_rowid_filters(THD *thd);
+ /* Best range row id filter for parital join */
friend
Range_rowid_filter_cost_info *
- TABLE::best_range_rowid_filter_for_partial_join(uint access_key_no,
- double records,
- double access_cost_factor);
+ TABLE::best_range_rowid_filter(uint access_key_no,
+ double records,
+ double fetch_cost,
+ double index_only_cost,
+ double prev_records,
+ double *records_out);
+ Range_rowid_filter_cost_info *
+ apply_filter(THD *thd, TABLE *table, ALL_READ_COST *cost,
+ double *records_arg,
+ double *startup_cost,
+ uint ranges, double record_count);
};
#endif /* ROWID_FILTER_INCLUDED */
diff --git a/sql/rpl_record_old.cc b/sql/rpl_record_old.cc
deleted file mode 100644
index 496e781d2eb..00000000000
--- a/sql/rpl_record_old.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-/* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
-
-#include "mariadb.h"
-#include "sql_priv.h"
-#include "rpl_rli.h"
-#include "rpl_record_old.h"
-#include "log_event.h" // Log_event_type
-
-size_t
-pack_row_old(TABLE *table, MY_BITMAP const* cols,
- uchar *row_data, const uchar *record)
-{
- Field **p_field= table->field, *field;
- int n_null_bytes= table->s->null_bytes;
- uchar *ptr;
- uint i;
- my_ptrdiff_t const rec_offset= record - table->record[0];
- my_ptrdiff_t const def_offset= table->s->default_values - table->record[0];
- memcpy(row_data, record, n_null_bytes);
- ptr= row_data+n_null_bytes;
-
- for (i= 0 ; (field= *p_field) ; i++, p_field++)
- {
- if (bitmap_is_set(cols,i))
- {
- my_ptrdiff_t const offset=
- field->is_null(rec_offset) ? def_offset : rec_offset;
- field->move_field_offset(offset);
- ptr= field->pack(ptr, field->ptr);
- field->move_field_offset(-offset);
- }
- }
- return (static_cast<size_t>(ptr - row_data));
-}
-
-
-/*
- Unpack a row into a record.
-
- SYNOPSIS
- unpack_row()
- rli Relay log info
- table Table to unpack into
- colcnt Number of columns to read from record
- record Record where the data should be unpacked
- row Packed row data
- cols Pointer to columns data to fill in
- row_end Pointer to variable that will hold the value of the
- one-after-end position for the row
- master_reclength
- Pointer to variable that will be set to the length of the
- record on the master side
- rw_set Pointer to bitmap that holds either the read_set or the
- write_set of the table
-
- DESCRIPTION
-
- The row is assumed to only consist of the fields for which the
- bitset represented by 'arr' and 'bits'; the other parts of the
- record are left alone.
-
- At most 'colcnt' columns are read: if the table is larger than
- that, the remaining fields are not filled in.
-
- RETURN VALUE
-
- Error code, or zero if no error. The following error codes can
- be returned:
-
- ER_NO_DEFAULT_FOR_FIELD
- Returned if one of the fields existing on the slave but not on
- the master does not have a default value (and isn't nullable)
- ER_SLAVE_CORRUPT_EVENT
- Wrong data for field found.
- */
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-int
-unpack_row_old(rpl_group_info *rgi,
- TABLE *table, uint const colcnt, uchar *record,
- uchar const *row, const uchar *row_buffer_end,
- MY_BITMAP const *cols,
- uchar const **row_end, ulong *master_reclength,
- MY_BITMAP* const rw_set, Log_event_type const event_type)
-{
- DBUG_ASSERT(record && row);
- my_ptrdiff_t const offset= record - (uchar*) table->record[0];
- size_t master_null_bytes= table->s->null_bytes;
-
- if (colcnt != table->s->fields)
- {
- Field **fptr= &table->field[colcnt-1];
- do
- master_null_bytes= (*fptr)->last_null_byte();
- while (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF &&
- fptr-- > table->field);
-
- /*
- If master_null_bytes is LAST_NULL_BYTE_UNDEF (0) at this time,
- there were no nullable fields nor BIT fields at all in the
- columns that are common to the master and the slave. In that
- case, there is only one null byte holding the X bit.
-
- OBSERVE! There might still be nullable columns following the
- common columns, so table->s->null_bytes might be greater than 1.
- */
- if (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF)
- master_null_bytes= 1;
- }
-
- DBUG_ASSERT(master_null_bytes <= table->s->null_bytes);
- memcpy(record, row, master_null_bytes); // [1]
- int error= 0;
-
- bitmap_set_all(rw_set);
-
- Field **const begin_ptr = table->field;
- Field **field_ptr;
- uchar const *ptr= row + master_null_bytes;
- Field **const end_ptr= begin_ptr + colcnt;
- for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr)
- {
- Field *const f= *field_ptr;
-
- if (bitmap_is_set(cols, (uint)(field_ptr - begin_ptr)))
- {
- f->move_field_offset(offset);
- ptr= f->unpack(f->ptr, ptr, row_buffer_end, 0);
- f->move_field_offset(-offset);
- if (!ptr)
- {
- rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT, NULL,
- "Could not read field `%s` of table `%s`.`%s`",
- f->field_name.str, table->s->db.str,
- table->s->table_name.str);
- return(ER_SLAVE_CORRUPT_EVENT);
- }
- }
- else
- bitmap_clear_bit(rw_set, (uint)(field_ptr - begin_ptr));
- }
-
- *row_end = ptr;
- if (master_reclength)
- {
- if (*field_ptr)
- *master_reclength = (ulong)((*field_ptr)->ptr - table->record[0]);
- else
- *master_reclength = table->s->reclength;
- }
-
- /*
- Set properties for remaining columns, if there are any. We let the
- corresponding bit in the write_set be set, to write the value if
- it was not there already. We iterate over all remaining columns,
- even if there were an error, to get as many error messages as
- possible. We are still able to return a pointer to the next row,
- so redo that.
-
- This generation of error messages is only relevant when inserting
- new rows.
- */
- for ( ; *field_ptr ; ++field_ptr)
- {
- uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG;
-
- DBUG_PRINT("debug", ("flags = 0x%x, mask = 0x%x, flags & mask = 0x%x",
- (*field_ptr)->flags, mask,
- (*field_ptr)->flags & mask));
-
- if (event_type == WRITE_ROWS_EVENT &&
- ((*field_ptr)->flags & mask) == mask)
- {
- rgi->rli->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD, NULL,
- "Field `%s` of table `%s`.`%s` "
- "has no default value and cannot be NULL",
- (*field_ptr)->field_name.str, table->s->db.str,
- table->s->table_name.str);
- error = ER_NO_DEFAULT_FOR_FIELD;
- }
- else
- (*field_ptr)->set_default();
- }
-
- return error;
-}
-#endif
diff --git a/sql/rpl_record_old.h b/sql/rpl_record_old.h
deleted file mode 100644
index 0b2dd432138..00000000000
--- a/sql/rpl_record_old.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2007, 2010, Oracle and/or its affiliates.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
-
-#ifndef RPL_RECORD_OLD_H
-#define RPL_RECORD_OLD_H
-
-#include "log_event.h" /* Log_event_type */
-
-#ifndef MYSQL_CLIENT
-size_t pack_row_old(TABLE *table, MY_BITMAP const* cols,
- uchar *row_data, const uchar *record);
-
-#ifdef HAVE_REPLICATION
-int unpack_row_old(rpl_group_info *rgi,
- TABLE *table, uint const colcnt, uchar *record,
- uchar const *row, uchar const *row_buffer_end,
- MY_BITMAP const *cols,
- uchar const **row_end, ulong *master_reclength,
- MY_BITMAP* const rw_set,
- Log_event_type const event_type);
-#endif
-#endif
-#endif
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index 83668449c2d..a8af950fa08 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -525,13 +525,7 @@ read_relay_log_description_event(IO_CACHE *cur_log, ulonglong start_pos,
Format_description_log_event *fdev;
bool found= false;
- /*
- By default the relay log is in binlog format 3 (4.0).
- Even if format is 4, this will work enough to read the first event
- (Format_desc) (remember that format 4 is just lenghtened compared to format
- 3; format 3 is a prefix of format 4).
- */
- fdev= new Format_description_log_event(3);
+ fdev= new Format_description_log_event(4);
while (!found)
{
@@ -666,14 +660,7 @@ int init_relay_log_pos(Relay_log_info* rli,const char* log,
running, say, CHANGE MASTER.
*/
delete rli->relay_log.description_event_for_exec;
- /*
- By default the relay log is in binlog format 3 (4.0).
- Even if format is 4, this will work enough to read the first event
- (Format_desc) (remember that format 4 is just lenghtened compared to format
- 3; format 3 is a prefix of format 4).
- */
- rli->relay_log.description_event_for_exec= new
- Format_description_log_event(3);
+ rli->relay_log.description_event_for_exec= new Format_description_log_event(4);
mysql_mutex_lock(log_lock);
diff --git a/sql/set_var.cc b/sql/set_var.cc
index aa9ec5ab5ca..b49040b8ec3 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -310,7 +310,7 @@ do { \
case SHOW_HA_ROWS: do_num_val (ha_rows,CMD);
#define case_for_double(CMD) \
- case SHOW_DOUBLE: do_num_val (double,CMD)
+ case SHOW_DOUBLE: do_num_val (double,CMD);
#define case_get_string_as_lex_string \
case SHOW_CHAR: \
@@ -1550,4 +1550,3 @@ ulonglong get_system_variable_hash_version(void)
{
return system_variable_hash_version;
}
-
diff --git a/sql/set_var.h b/sql/set_var.h
index ce1d01b9bd2..70f2cb8ead2 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -84,7 +84,7 @@ protected:
typedef bool (*on_update_function)(sys_var *self, THD *thd, enum_var_type type);
int flags; ///< or'ed flag_enum values
- const SHOW_TYPE show_val_type; ///< what value_ptr() returns for sql_show.cc
+ SHOW_TYPE show_val_type; ///< what value_ptr() returns for sql_show.cc
PolyLock *guard; ///< *second* lock that protects the variable
ptrdiff_t offset; ///< offset to the value from global_system_variables
on_check_function on_check;
@@ -134,6 +134,8 @@ public:
return system_charset_info;
}
bool is_readonly() const { return flags & READONLY; }
+ void update_flags(int new_flags) { flags = new_flags; }
+ int get_flags() const { return flags; }
/**
the following is only true for keycache variables,
that support the syntax @@keycache_name.variable_name
@@ -485,5 +487,4 @@ void free_engine_list(plugin_ref *list);
plugin_ref *copy_engine_list(plugin_ref *list);
plugin_ref *temp_copy_engine_list(THD *thd, plugin_ref *list);
char *pretty_print_engine_list(THD *thd, plugin_ref *list);
-
#endif
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index 7c27fa38ef1..34a5dad6d3f 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -6746,11 +6746,8 @@ ER_VIEW_FRM_NO_USER
ger "View '%-.192s'.'%-.192s' hat keine Definierer-Information (altes Tabellenformat). Der aktuelle Benutzer wird als Definierer verwendet. Bitte erstellen Sie den View neu"
geo "ხედს '%-.192s'.'%-.192s' áƒáƒ¦áƒ›áƒ¬áƒ”რის ინფáƒáƒ áƒ›áƒáƒªáƒ˜áƒ áƒáƒ  გáƒáƒáƒ©áƒœáƒ˜áƒ (ცხრილის ფáƒáƒ áƒ›áƒáƒ¢áƒ˜ ძველიáƒ). áƒáƒ¦áƒ›áƒ¬áƒ”რáƒáƒ“ მიმდინáƒáƒ áƒ” მáƒáƒ›áƒ®áƒ›áƒáƒ áƒ”ბელი გáƒáƒ›áƒáƒ˜áƒ§áƒ”ნებáƒ. გთხáƒáƒ•áƒ—, შექმენით ეს ხედი თáƒáƒ•áƒ˜áƒ“áƒáƒœ!"
spa "La vista '%-.192s'.'%-.192s' no tiene información de definidor (formato viejo de tabla). Se usa el usuario actual como definidor. Por favor, ¡recrea la vista!"
-ER_VIEW_OTHER_USER
- chi "您需è¦ä½¿ç”¨'%-.192s'@'%-.192s'的创建视图的超级特æƒ"
- eng "You need the SUPER privilege for creation view with '%-.192s'@'%-.192s' definer"
- ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.192s'@'%-.192s' zu erzeugen"
- spa "Vd necesita el privilegio SUPER para la creación de la vista con definidor '%-.192s'@'%-.192s'"
+ER_UNUSED_30
+ eng "You should never see it"
ER_NO_SUCH_USER
chi "指定为定义的用户('%-.64s'@'%-.64s')ä¸å­˜åœ¨"
eng "The user specified as a definer ('%-.64s'@'%-.64s') does not exist"
diff --git a/sql/slave.cc b/sql/slave.cc
index 5c8db6f2c56..86180af8aed 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -160,7 +160,6 @@ failed read"
typedef enum { SLAVE_THD_IO, SLAVE_THD_SQL} SLAVE_THD_TYPE;
static int process_io_rotate(Master_info* mi, Rotate_log_event* rev);
-static int process_io_create_file(Master_info* mi, Create_file_log_event* cev);
static bool wait_for_relay_log_space(Relay_log_info* rli);
static bool io_slave_killed(Master_info* mi);
static bool sql_slave_killed(rpl_group_info *rgi);
@@ -1487,20 +1486,6 @@ bool net_request_file(NET* net, const char* fname)
(uchar*) "", 0));
}
-/*
- From other comments and tests in code, it looks like
- sometimes Query_log_event and Load_log_event can have db == 0
- (see rewrite_db() above for example)
- (cases where this happens are unclear; it may be when the master is 3.23).
-*/
-
-const char *print_slave_db_safe(const char* db)
-{
- DBUG_ENTER("*print_slave_db_safe");
-
- DBUG_RETURN((db ? db : ""));
-}
-
#endif /* HAVE_REPLICATION */
bool Sql_cmd_show_slave_status::execute(THD *thd)
@@ -1785,6 +1770,8 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
case 0:
case 1:
case 2:
+ case 3:
+ case 4:
errmsg= err_buff2;
snprintf(err_buff2, sizeof(err_buff2),
"Master reported unrecognized MariaDB version: %s",
@@ -1792,14 +1779,6 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
err_code= ER_SLAVE_FATAL_ERROR;
sprintf(err_buff, ER_DEFAULT(err_code), err_buff2);
break;
- case 3:
- mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(1, mysql->server_version);
- break;
- case 4:
- mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(3, mysql->server_version);
- break;
default:
/*
Master is MySQL >=5.0. Give a default Format_desc event, so that we can
@@ -4877,28 +4856,25 @@ connected:
goto connected;
}
- if (mi->rli.relay_log.description_event_for_queue->binlog_version > 1)
+ /*
+ Register ourselves with the master.
+ */
+ THD_STAGE_INFO(thd, stage_registering_slave_on_master);
+ if (register_slave_on_master(mysql, mi, &suppress_warnings))
{
- /*
- Register ourselves with the master.
- */
- THD_STAGE_INFO(thd, stage_registering_slave_on_master);
- if (register_slave_on_master(mysql, mi, &suppress_warnings))
+ if (!check_io_slave_killed(mi, "Slave I/O thread killed "
+ "while registering slave on master"))
{
- if (!check_io_slave_killed(mi, "Slave I/O thread killed "
- "while registering slave on master"))
- {
- sql_print_error("Slave I/O thread couldn't register on master");
- if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings,
- reconnect_messages[SLAVE_RECON_ACT_REG]))
- goto err;
- }
- else
+ sql_print_error("Slave I/O thread couldn't register on master");
+ if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings,
+ reconnect_messages[SLAVE_RECON_ACT_REG]))
goto err;
- goto connected;
}
- DBUG_EXECUTE_IF("fail_com_register_slave", goto err;);
+ else
+ goto err;
+ goto connected;
}
+ DBUG_EXECUTE_IF("fail_com_register_slave", goto err;);
DBUG_PRINT("info",("Starting reading binary log from master"));
thd->set_command(COM_SLAVE_IO);
@@ -5874,115 +5850,6 @@ err_during_init:
/*
- process_io_create_file()
-*/
-
-static int process_io_create_file(Master_info* mi, Create_file_log_event* cev)
-{
- int error = 1;
- ulong num_bytes;
- bool cev_not_written;
- THD *thd = mi->io_thd;
- NET *net = &mi->mysql->net;
- DBUG_ENTER("process_io_create_file");
-
- if (unlikely(!cev->is_valid()))
- DBUG_RETURN(1);
-
- if (!mi->rpl_filter->db_ok(cev->db))
- {
- skip_load_data_infile(net);
- DBUG_RETURN(0);
- }
- DBUG_ASSERT(cev->inited_from_old);
- thd->file_id = cev->file_id = mi->file_id++;
- thd->variables.server_id = cev->server_id;
- cev_not_written = 1;
-
- if (unlikely(net_request_file(net,cev->fname)))
- {
- sql_print_error("Slave I/O: failed requesting download of '%s'",
- cev->fname);
- goto err;
- }
-
- /*
- This dummy block is so we could instantiate Append_block_log_event
- once and then modify it slightly instead of doing it multiple times
- in the loop
- */
- {
- Append_block_log_event aev(thd,0,0,0,0);
-
- for (;;)
- {
- if (unlikely((num_bytes=my_net_read(net)) == packet_error))
- {
- sql_print_error("Network read error downloading '%s' from master",
- cev->fname);
- goto err;
- }
- if (unlikely(!num_bytes)) /* eof */
- {
- /* 3.23 master wants it */
- net_write_command(net, 0, (uchar*) "", 0, (uchar*) "", 0);
- /*
- If we wrote Create_file_log_event, then we need to write
- Execute_load_log_event. If we did not write Create_file_log_event,
- then this is an empty file and we can just do as if the LOAD DATA
- INFILE had not existed, i.e. write nothing.
- */
- if (unlikely(cev_not_written))
- break;
- Execute_load_log_event xev(thd,0,0);
- xev.log_pos = cev->log_pos;
- if (unlikely(mi->rli.relay_log.append(&xev)))
- {
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
- ER_THD(thd, ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
- "error writing Exec_load event to relay log");
- goto err;
- }
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
- break;
- }
- if (unlikely(cev_not_written))
- {
- cev->block = net->read_pos;
- cev->block_len = num_bytes;
- if (unlikely(mi->rli.relay_log.append(cev)))
- {
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
- ER_THD(thd, ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
- "error writing Create_file event to relay log");
- goto err;
- }
- cev_not_written=0;
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
- }
- else
- {
- aev.block = net->read_pos;
- aev.block_len = num_bytes;
- aev.log_pos = cev->log_pos;
- if (unlikely(mi->rli.relay_log.append(&aev)))
- {
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
- ER_THD(thd, ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
- "error writing Append_block event to relay log");
- goto err;
- }
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total) ;
- }
- }
- }
- error=0;
-err:
- DBUG_RETURN(error);
-}
-
-
-/*
Start using a new binary log on the master
SYNOPSIS
@@ -6026,25 +5893,10 @@ static int process_io_rotate(Master_info *mi, Rotate_log_event *rev)
mi->events_till_disconnect++;
#endif
- /*
- If description_event_for_queue is format <4, there is conversion in the
- relay log to the slave's format (4). And Rotate can mean upgrade or
- nothing. If upgrade, it's to 5.0 or newer, so we will get a Format_desc, so
- no need to reset description_event_for_queue now. And if it's nothing (same
- master version as before), no need (still using the slave's format).
- */
+ /* this prevents a redundant FDLE in the relay log */
if (mi->rli.relay_log.description_event_for_queue->binlog_version >= 4)
- {
- DBUG_ASSERT(mi->rli.relay_log.description_event_for_queue->checksum_alg ==
- mi->rli.relay_log.relay_log_checksum_alg);
-
- delete mi->rli.relay_log.description_event_for_queue;
- /* start from format 3 (MySQL 4.0) again */
- mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(3);
- mi->rli.relay_log.description_event_for_queue->checksum_alg=
- mi->rli.relay_log.relay_log_checksum_alg;
- }
+ mi->rli.relay_log.description_event_for_queue->binlog_version= 3;
+
/*
Rotate the relay log makes binlog format detection easier (at next slave
start or mysqlbinlog)
@@ -6053,216 +5905,9 @@ static int process_io_rotate(Master_info *mi, Rotate_log_event *rev)
}
/*
- Reads a 3.23 event and converts it to the slave's format. This code was
- copied from MySQL 4.0.
-*/
-static int queue_binlog_ver_1_event(Master_info *mi, const uchar *buf,
- ulong event_len)
-{
- const char *errmsg = 0;
- ulong inc_pos;
- bool ignore_event= 0;
- uchar *tmp_buf = 0;
- Relay_log_info *rli= &mi->rli;
- DBUG_ENTER("queue_binlog_ver_1_event");
-
- /*
- If we get Load event, we need to pass a non-reusable buffer
- to read_log_event, so we do a trick
- */
- if ((uchar)buf[EVENT_TYPE_OFFSET] == LOAD_EVENT)
- {
- if (unlikely(!(tmp_buf= (uchar*) my_malloc(key_memory_binlog_ver_1_event,
- event_len+1, MYF(MY_WME)))))
- {
- mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL,
- ER(ER_SLAVE_FATAL_ERROR), "Memory allocation failed");
- DBUG_RETURN(1);
- }
- memcpy(tmp_buf,buf,event_len);
- /*
- Create_file constructor wants a 0 as last char of buffer, this 0 will
- serve as the string-termination char for the file's name (which is at the
- end of the buffer)
- We must increment event_len, otherwise the event constructor will not see
- this end 0, which leads to segfault.
- */
- tmp_buf[event_len++]=0;
- int4store(tmp_buf+EVENT_LEN_OFFSET, event_len);
- buf= tmp_buf;
- }
- /*
- This will transform LOAD_EVENT into CREATE_FILE_EVENT, ask the master to
- send the loaded file, and write it to the relay log in the form of
- Append_block/Exec_load (the SQL thread needs the data, as that thread is not
- connected to the master).
- */
- Log_event *ev=
- Log_event::read_log_event(buf, event_len, &errmsg,
- mi->rli.relay_log.description_event_for_queue, 0);
- if (unlikely(!ev))
- {
- sql_print_error("Read invalid event from master: '%s',\
- master could be corrupt but a more likely cause of this is a bug",
- errmsg);
- my_free(tmp_buf);
- DBUG_RETURN(1);
- }
-
- mysql_mutex_lock(&mi->data_lock);
- ev->log_pos= mi->master_log_pos; /* 3.23 events don't contain log_pos */
- switch (ev->get_type_code()) {
- case STOP_EVENT:
- ignore_event= 1;
- inc_pos= event_len;
- break;
- case ROTATE_EVENT:
- if (unlikely(process_io_rotate(mi,(Rotate_log_event*)ev)))
- {
- delete ev;
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(1);
- }
- inc_pos= 0;
- break;
- case CREATE_FILE_EVENT:
- /*
- Yes it's possible to have CREATE_FILE_EVENT here, even if we're in
- queue_old_event() which is for 3.23 events which don't comprise
- CREATE_FILE_EVENT. This is because read_log_event() above has just
- transformed LOAD_EVENT into CREATE_FILE_EVENT.
- */
- {
- /* We come here when and only when tmp_buf != 0 */
- DBUG_ASSERT(tmp_buf != 0);
- inc_pos=event_len;
- ev->log_pos+= inc_pos;
- int error = process_io_create_file(mi,(Create_file_log_event*)ev);
- delete ev;
- mi->master_log_pos += inc_pos;
- DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos));
- mysql_mutex_unlock(&mi->data_lock);
- my_free(tmp_buf);
- DBUG_RETURN(error);
- }
- default:
- inc_pos= event_len;
- break;
- }
- if (likely(!ignore_event))
- {
- if (ev->log_pos)
- /*
- Don't do it for fake Rotate events (see comment in
- Log_event::Log_event(const char* buf...) in log_event.cc).
- */
- ev->log_pos+= event_len; /* make log_pos be the pos of the end of the event */
- if (unlikely(rli->relay_log.append(ev)))
- {
- delete ev;
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(1);
- }
- rli->relay_log.harvest_bytes_written(&rli->log_space_total);
- }
- delete ev;
- mi->master_log_pos+= inc_pos;
- DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos));
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(0);
-}
-
-/*
- Reads a 4.0 event and converts it to the slave's format. This code was copied
- from queue_binlog_ver_1_event(), with some affordable simplifications.
-*/
-static int queue_binlog_ver_3_event(Master_info *mi, const uchar *buf,
- ulong event_len)
-{
- const char *errmsg = 0;
- ulong inc_pos;
- char *tmp_buf = 0;
- Relay_log_info *rli= &mi->rli;
- DBUG_ENTER("queue_binlog_ver_3_event");
-
- /* read_log_event() will adjust log_pos to be end_log_pos */
- Log_event *ev=
- Log_event::read_log_event(buf, event_len, &errmsg,
- mi->rli.relay_log.description_event_for_queue, 0);
- if (unlikely(!ev))
- {
- sql_print_error("Read invalid event from master: '%s',\
- master could be corrupt but a more likely cause of this is a bug",
- errmsg);
- my_free(tmp_buf);
- DBUG_RETURN(1);
- }
- mysql_mutex_lock(&mi->data_lock);
- switch (ev->get_type_code()) {
- case STOP_EVENT:
- goto err;
- case ROTATE_EVENT:
- if (unlikely(process_io_rotate(mi,(Rotate_log_event*)ev)))
- {
- delete ev;
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(1);
- }
- inc_pos= 0;
- break;
- default:
- inc_pos= event_len;
- break;
- }
-
- if (unlikely(rli->relay_log.append(ev)))
- {
- delete ev;
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(1);
- }
- rli->relay_log.harvest_bytes_written(&rli->log_space_total);
- delete ev;
- mi->master_log_pos+= inc_pos;
-err:
- DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos));
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(0);
-}
-
-/*
- queue_old_event()
-
- Writes a 3.23 or 4.0 event to the relay log, after converting it to the 5.0
- (exactly, slave's) format. To do the conversion, we create a 5.0 event from
- the 3.23/4.0 bytes, then write this event to the relay log.
-
- TODO:
- Test this code before release - it has to be tested on a separate
- setup with 3.23 master or 4.0 master
-*/
-
-static int queue_old_event(Master_info *mi, const uchar *buf, ulong event_len)
-{
- DBUG_ENTER("queue_old_event");
-
- switch (mi->rli.relay_log.description_event_for_queue->binlog_version) {
- case 1:
- DBUG_RETURN(queue_binlog_ver_1_event(mi,buf,event_len));
- case 3:
- DBUG_RETURN(queue_binlog_ver_3_event(mi,buf,event_len));
- default: /* unsupported format; eg version 2 */
- DBUG_PRINT("info",("unsupported binlog format %d in queue_old_event()",
- mi->rli.relay_log.description_event_for_queue->binlog_version));
- DBUG_RETURN(1);
- }
-}
-
-/*
queue_event()
- If the event is 3.23/4.0, passes it to queue_old_event() which will convert
- it. Otherwise, writes a 5.0 (or newer) event to the relay log. Then there is
+ Writes a 5.0 (or newer) event to the relay log. Then there is
no format conversion, it's pure read/write of bytes.
So a 5.0.0 slave's relay log can contain events in the slave's format or in
any >=5.0.0 format.
@@ -6350,10 +5995,6 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
}
DBUG_ASSERT(((uchar) buf[FLAGS_OFFSET] & LOG_EVENT_ACCEPT_OWN_F) == 0);
- if (mi->rli.relay_log.description_event_for_queue->binlog_version<4 &&
- buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT /* a way to escape */)
- DBUG_RETURN(queue_old_event(mi,buf,event_len));
-
#ifdef ENABLED_DEBUG_SYNC
/*
A (+d,dbug.rows_events_to_delay_relay_logging)-test is supposed to
@@ -6617,7 +6258,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
*/
inc_pos= uint4korr(buf+LOG_POS_OFFSET) ? event_len : 0;
DBUG_PRINT("info",("binlog format is now %d",
- mi->rli.relay_log.description_event_for_queue->binlog_version));
+ mi->rli.relay_log.description_event_for_queue->binlog_version));
}
break;
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index a2f7be8a684..6f2fa9bf672 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -1058,7 +1058,7 @@ class User_table_tabular: public User_table
access|= DELETE_HISTORY_ACL;
if (access & SUPER_ACL)
- access|= GLOBAL_SUPER_ADDED_SINCE_USER_TABLE_ACLS;
+ access|= ALLOWED_BY_SUPER_BEFORE_101100 | ALLOWED_BY_SUPER_BEFORE_110000;
/*
The SHOW SLAVE HOSTS statement :
@@ -1545,10 +1545,15 @@ class User_table_json: public User_table
{
privilege_t mask= ALL_KNOWN_ACL_100304;
ulonglong orig_access= access;
+ if (version_id < 110000)
+ {
+ if (access & SUPER_ACL)
+ access|= ALLOWED_BY_SUPER_BEFORE_110000;
+ }
if (version_id < 101100)
{
if (access & SUPER_ACL)
- access|= READ_ONLY_ADMIN_ACL;
+ access|= ALLOWED_BY_SUPER_BEFORE_101100;
}
if (version_id >= 100509)
{
@@ -1566,26 +1571,6 @@ class User_table_json: public User_table
else // 100501 or earlier
{
/*
- Address changes in SUPER and REPLICATION SLAVE made in 10.5.2.
- This also covers a special case: if the user had ALL PRIVILEGES before
- the upgrade, it gets ALL PRIVILEGES after the upgrade.
- */
- if (access & SUPER_ACL)
- {
- if (access & REPL_SLAVE_ACL)
- {
- /*
- The user could do both before the upgrade:
- - set global variables (because of SUPER_ACL)
- - execute "SHOW SLAVE HOSTS" (because of REPL_SLAVE_ACL)
- Grant all new privileges that were splitted from SUPER (in 10.5.2),
- and REPLICATION MASTER ADMIN, so it still can do "SHOW SLAVE HOSTS".
- */
- access|= REPL_MASTER_ADMIN_ACL;
- }
- access|= GLOBAL_SUPER_ADDED_SINCE_USER_TABLE_ACLS;
- }
- /*
REPLICATION_CLIENT(BINLOG_MONITOR_ACL) should allow SHOW SLAVE STATUS
REPLICATION SLAVE should allow SHOW RELAYLOG EVENTS
*/
@@ -2533,6 +2518,8 @@ bool acl_init(bool dont_read_acl_tables)
DBUG_RETURN(1); /* purecov: inspected */
thd->thread_stack= (char*) &thd;
thd->store_globals();
+ thd->set_query_inner((char*) STRING_WITH_LEN("intern:acl_init"),
+ default_charset_info);
/*
It is safe to call acl_reload() since acl_* arrays and hashes which
will be freed there are global static objects and thus are initialized
@@ -8001,6 +7988,9 @@ bool grant_init()
DBUG_RETURN(1); /* purecov: deadcode */
thd->thread_stack= (char*) &thd;
thd->store_globals();
+ thd->set_query_inner((char*) STRING_WITH_LEN("intern:grant_init"),
+ default_charset_info);
+
return_val= grant_reload(thd);
delete thd;
DBUG_RETURN(return_val);
@@ -13306,12 +13296,7 @@ static bool send_server_handshake_packet(MPVIO_EXT *mpvio,
data_len= SCRAMBLE_LENGTH;
}
- /* When server version is specified in config file, don't include
- the replication hack prefix. */
- if (using_custom_server_version)
- end= strnmov(end, server_version, SERVER_VERSION_LENGTH) + 1;
- else
- end= strxnmov(end, SERVER_VERSION_LENGTH, RPL_VERSION_HACK, server_version, NullS) + 1;
+ end= strnmov(end, server_version, SERVER_VERSION_LENGTH) + 1;
int4store((uchar*) end, mpvio->auth_info.thd->thread_id);
end+= 4;
diff --git a/sql/sql_analyze_stmt.h b/sql/sql_analyze_stmt.h
index 4136dff1e0c..8f60d4b523a 100644
--- a/sql/sql_analyze_stmt.h
+++ b/sql/sql_analyze_stmt.h
@@ -414,7 +414,8 @@ public:
n_positive_checks++;
}
- inline void increment_container_elements_count() { container_elements++; }
+ inline void set_container_elements_count(uint elements)
+ { container_elements= elements; }
uint get_container_elements() const { return container_elements; }
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 6625571fb32..fa2b144e06f 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -4757,6 +4757,7 @@ restart:
tbl->reginfo.lock_type= tables->lock_type;
tbl->reginfo.skip_locked= tables->skip_locked;
}
+
#ifdef WITH_WSREP
/*
At this point we have SE associated with table so we can check wsrep_mode
diff --git a/sql/sql_base.h b/sql/sql_base.h
index bd439166a0f..6e17d8214ad 100644
--- a/sql/sql_base.h
+++ b/sql/sql_base.h
@@ -357,7 +357,7 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr)
}
table->tablenr= tablenr;
table->map= (table_map) 1 << tablenr;
- table->force_index= table_list->force_index;
+ table->force_index= table->force_index_join= 0;
table->force_index_order= table->force_index_group= 0;
table->covering_keys= table->s->keys_for_keyread;
}
diff --git a/sql/sql_bitmap.h b/sql/sql_bitmap.h
index 353601eb98a..05b201a5d6e 100644
--- a/sql/sql_bitmap.h
+++ b/sql/sql_bitmap.h
@@ -270,13 +270,21 @@ public:
{
return buffer[0];
}
- uint bits_set()
+ uint bits_set() const
{
uint res= 0;
for (size_t i= 0; i < ARRAY_ELEMENTS; i++)
- res += my_count_bits(buffer[i]);
+ if (buffer[i])
+ res+= my_count_bits(buffer[i]);
return res;
}
+ uint find_first_bit() const
+ {
+ for (size_t i= 0; i < ARRAY_ELEMENTS; i++)
+ if (buffer[i])
+ return (uint)i*BITS_PER_ELEMENT + my_find_first_bit(buffer[i]);
+ return width;
+ }
class Iterator
{
const Bitmap& map;
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index e0c6c15a3e2..c6a929d6fea 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1235,7 +1235,6 @@ void THD::init()
avoid temporary tables replication failure.
*/
variables.pseudo_thread_id= thread_id;
-
variables.default_master_connection.str= default_master_connection_buff;
::strmake(default_master_connection_buff,
global_system_variables.default_master_connection.str,
diff --git a/sql/sql_class.h b/sql/sql_class.h
index ab1fc174452..79f347f40c5 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -90,7 +90,6 @@ struct rpl_group_info;
struct rpl_parallel_thread;
class Rpl_filter;
class Query_log_event;
-class Load_log_event;
class Log_event_writer;
class sp_rcontext;
class sp_cache;
@@ -685,7 +684,7 @@ typedef struct system_variables
char* dynamic_variables_ptr;
uint dynamic_variables_head; /* largest valid variable offset */
uint dynamic_variables_size; /* how many bytes are in use */
-
+
ulonglong max_heap_table_size;
ulonglong tmp_memory_table_size;
ulonglong tmp_disk_table_size;
@@ -693,7 +692,6 @@ typedef struct system_variables
ulonglong max_statement_time;
ulonglong optimizer_switch;
ulonglong optimizer_trace;
- ulong optimizer_trace_max_mem_size;
sql_mode_t sql_mode; ///< which non-standard SQL behaviour should be enabled
sql_mode_t old_behavior; ///< which old SQL behaviour should be enabled
ulonglong option_bits; ///< OPTION_xxx constants, e.g. OPTION_PROFILING
@@ -707,6 +705,7 @@ typedef struct system_variables
ulonglong sortbuff_size;
ulonglong default_regex_flags;
ulonglong max_mem_used;
+ ulonglong max_rowid_filter_size;
/**
Place holders to store Multi-source variables in sys_var.cc during
@@ -715,10 +714,14 @@ typedef struct system_variables
ulonglong slave_skip_counter;
ulonglong max_relay_log_size;
+ double optimizer_where_cost, optimizer_scan_setup_cost;
+ double long_query_time_double, max_statement_time_double;
+ double sample_percentage;
+
ha_rows select_limit;
ha_rows max_join_size;
ha_rows expensive_subquery_limit;
- ulong auto_increment_increment, auto_increment_offset;
+
#ifdef WITH_WSREP
/*
Stored values of the auto_increment_increment and auto_increment_offset
@@ -727,11 +730,12 @@ typedef struct system_variables
original values (which are set by the user) by calculated ones (which
are based on the cluster size):
*/
+ ulonglong wsrep_gtid_seq_no;
ulong saved_auto_increment_increment, saved_auto_increment_offset;
ulong saved_lock_wait_timeout;
- ulonglong wsrep_gtid_seq_no;
#endif /* WITH_WSREP */
- uint eq_range_index_dive_limit;
+
+ ulong auto_increment_increment, auto_increment_offset;
ulong column_compression_zlib_strategy;
ulong lock_wait_timeout;
ulong join_cache_level;
@@ -754,8 +758,8 @@ typedef struct system_variables
ulong optimizer_search_depth;
ulong optimizer_selectivity_sampling_limit;
ulong optimizer_use_condition_selectivity;
+ ulong optimizer_trace_max_mem_size;
ulong use_stat_tables;
- double sample_percentage;
ulong histogram_size;
ulong histogram_type;
ulong preload_buff_size;
@@ -785,8 +789,16 @@ typedef struct system_variables
ulong tx_isolation;
ulong updatable_views_with_limit;
ulong alter_algorithm;
- int max_user_connections;
ulong server_id;
+ ulong session_track_transaction_info;
+ ulong threadpool_priority;
+ ulong optimizer_max_sel_arg_weight;
+ ulong vers_alter_history;
+
+ /* deadlock detection */
+ ulong wt_timeout_short, wt_deadlock_search_depth_short;
+ ulong wt_timeout_long, wt_deadlock_search_depth_long;
+
/**
In slave thread we need to know in behalf of which
thread the query is being run to replicate temp tables properly
@@ -796,10 +808,18 @@ typedef struct system_variables
When replicating an event group with GTID, keep these values around so
slave binlog can receive the same GTID as the original.
*/
- uint32 gtid_domain_id;
uint64 gtid_seq_no;
+ uint32 gtid_domain_id;
uint group_concat_max_len;
+ uint eq_range_index_dive_limit;
+ uint idle_transaction_timeout;
+ uint idle_readonly_transaction_timeout;
+ uint idle_write_transaction_timeout;
+ uint column_compression_threshold;
+ uint column_compression_zlib_level;
+ uint in_subquery_conversion_threshold;
+ int max_user_connections;
/**
Default transaction access mode. READ ONLY (true) or READ WRITE (false).
@@ -819,7 +839,17 @@ typedef struct system_variables
my_bool binlog_annotate_row_events;
my_bool binlog_direct_non_trans_update;
my_bool column_compression_zlib_wrap;
-
+ my_bool sysdate_is_now;
+ my_bool wsrep_on;
+ my_bool wsrep_causal_reads;
+ my_bool wsrep_dirty_reads;
+ my_bool pseudo_slave_mode;
+ my_bool session_track_schema;
+ my_bool session_track_state_change;
+#ifdef USER_VAR_TRACKING
+ my_bool session_track_user_variables;
+#endif // USER_VAR_TRACKING
+ my_bool tcp_nodelay;
plugin_ref table_plugin;
plugin_ref tmp_table_plugin;
plugin_ref enforced_table_plugin;
@@ -845,47 +875,16 @@ typedef struct system_variables
MY_LOCALE *lc_time_names;
Time_zone *time_zone;
+ char *session_track_system_variables;
- my_bool sysdate_is_now;
-
- /* deadlock detection */
- ulong wt_timeout_short, wt_deadlock_search_depth_short;
- ulong wt_timeout_long, wt_deadlock_search_depth_long;
-
- my_bool wsrep_on;
- my_bool wsrep_causal_reads;
- uint wsrep_sync_wait;
- ulong wsrep_retry_autocommit;
+ /* Some wsrep variables */
ulonglong wsrep_trx_fragment_size;
+ ulong wsrep_retry_autocommit;
ulong wsrep_trx_fragment_unit;
ulong wsrep_OSU_method;
- my_bool wsrep_dirty_reads;
- double long_query_time_double, max_statement_time_double;
-
- my_bool pseudo_slave_mode;
-
- char *session_track_system_variables;
- ulong session_track_transaction_info;
- my_bool session_track_schema;
- my_bool session_track_state_change;
-#ifdef USER_VAR_TRACKING
- my_bool session_track_user_variables;
-#endif // USER_VAR_TRACKING
- my_bool tcp_nodelay;
-
- ulong threadpool_priority;
-
- uint idle_transaction_timeout;
- uint idle_readonly_transaction_timeout;
- uint idle_write_transaction_timeout;
- uint column_compression_threshold;
- uint column_compression_zlib_level;
- uint in_subquery_conversion_threshold;
- ulong optimizer_max_sel_arg_weight;
- ulonglong max_rowid_filter_size;
+ uint wsrep_sync_wait;
vers_asof_timestamp_t vers_asof_timestamp;
- ulong vers_alter_history;
my_bool binlog_alter_two_phase;
} SV;
@@ -976,19 +975,21 @@ typedef struct system_status_var
functions are used */
ulong feature_dynamic_columns; /* +1 when creating a dynamic column */
ulong feature_fulltext; /* +1 when MATCH is used */
- ulong feature_gis; /* +1 opening a table with GIS features */
- ulong feature_invisible_columns; /* +1 opening a table with invisible column */
- ulong feature_json; /* +1 when JSON function appears in the statement */
+ ulong feature_gis; /* +1 opening table with GIS features */
+ ulong feature_invisible_columns; /* +1 opening table with invisible column */
+ ulong feature_json; /* +1 when JSON function is used */
ulong feature_locale; /* +1 when LOCALE is set */
ulong feature_subquery; /* +1 when subqueries are used */
- ulong feature_system_versioning; /* +1 opening a table WITH SYSTEM VERSIONING */
+ ulong feature_system_versioning; /* +1 opening table WITH SYSTEM VERSIONING */
ulong feature_application_time_periods;
/* +1 opening a table with application-time period */
- ulong feature_insert_returning; /* +1 when INSERT...RETURNING is used */
+ ulong feature_insert_returning; /* +1 when INSERT...RETURNING is used */
ulong feature_timezone; /* +1 when XPATH is used */
ulong feature_trigger; /* +1 opening a table with triggers */
ulong feature_xml; /* +1 when XPATH is used */
ulong feature_window_functions; /* +1 when window functions are used */
+ ulong feature_into_outfile; /* +1 when INTO OUTFILE is used */
+ ulong feature_into_variable; /* +1 when INTO VARIABLE is used */
/* From MASTER_GTID_WAIT usage */
ulong master_gtid_wait_timeouts; /* Number of timeouts */
@@ -2693,6 +2694,7 @@ public:
struct system_status_var org_status_var; // For user statistics
struct system_status_var *initial_status_var; /* used by show status */
THR_LOCK_INFO lock_info; // Locking info of this thread
+
/**
Protects THD data accessed from other threads:
- thd->query and thd->query_length (used by SHOW ENGINE
@@ -3423,7 +3425,7 @@ public:
Check if the number of rows accessed by a statement exceeded
LIMIT ROWS EXAMINED. If so, signal the query engine to stop execution.
*/
- void check_limit_rows_examined()
+ inline void check_limit_rows_examined()
{
if (++accessed_rows_and_keys > lex->limit_rows_examined_cnt)
set_killed(ABORT_QUERY);
@@ -6270,6 +6272,7 @@ public:
Item **items_to_copy; /* Fields in tmp table */
TMP_ENGINE_COLUMNDEF *recinfo, *start_recinfo;
KEY *keyinfo;
+ ulong *rec_per_key;
ha_rows end_write_records;
/**
Number of normal fields in the query, including those referred to
@@ -6845,13 +6848,13 @@ public:
/*
Cost to materialize - execute the sub-join and write rows into temp.table
*/
- Cost_estimate materialization_cost;
+ double materialization_cost;
/* Cost to make one lookup in the temptable */
- Cost_estimate lookup_cost;
+ double lookup_cost;
/* Cost of scanning the materialized table */
- Cost_estimate scan_cost;
+ double scan_cost;
/* --- Execution structures ---------- */
@@ -7440,11 +7443,38 @@ inline void handler::increment_statistics(ulong SSV::*offset) const
table->in_use->check_limit_rows_examined();
}
+inline void handler::fast_increment_statistics(ulong SSV::*offset) const
+{
+ status_var_increment(table->in_use->status_var.*offset);
+}
+
inline void handler::decrement_statistics(ulong SSV::*offset) const
{
status_var_decrement(table->in_use->status_var.*offset);
}
+/* Update references in the handler to the table */
+
+inline void handler::set_table(TABLE* table_arg)
+{
+ table= table_arg;
+ costs= &table_arg->s->optimizer_costs;
+}
+
+inline bool handler::pk_is_clustering_key(uint index) const
+{
+ /*
+ We have to check for MAX_INDEX as table->s->primary_key can be
+ MAX_KEY in the case where there is no primary key.
+ */
+ return index != MAX_KEY && is_clustering_key(index);
+}
+
+inline bool handler::is_clustering_key(uint index) const
+{
+ DBUG_ASSERT(index != MAX_KEY);
+ return table->is_clustering_key(index);
+}
inline int handler::ha_ft_read(uchar *buf)
{
diff --git a/sql/sql_const.h b/sql/sql_const.h
index 490b870d768..1e5fef4af36 100644
--- a/sql/sql_const.h
+++ b/sql/sql_const.h
@@ -119,8 +119,13 @@
#define CREATE_MODE 0 /* Default mode on new files */
#define NAMES_SEP_CHAR 255 /* Char to sep. names */
-#define READ_RECORD_BUFFER (uint) (IO_SIZE*8) /* Pointer_buffer_size */
-#define DISK_BUFFER_SIZE (uint) (IO_SIZE*16) /* Size of diskbuffer */
+/*
+ This is used when reading large blocks, sequential read.
+ We assume that reading this much will be roughly the same cost as 1
+ seek / fetching one row from the storage engine.
+ Cost of one read of DISK_CHUNK_SIZE is DISK_SEEK_BASE_COST (ms).
+*/
+#define DISK_CHUNK_SIZE (uint) (65536) /* Size of diskbuffer for tmpfiles */
#define FRM_VER_TRUE_VARCHAR (FRM_VER+4) /* 10 */
#define FRM_VER_EXPRESSSIONS (FRM_VER+5) /* 11 */
@@ -199,63 +204,19 @@
#define MIN_ROWS_TO_USE_TABLE_CACHE 100
#define MIN_ROWS_TO_USE_BULK_INSERT 100
-/**
- The following is used to decide if MySQL should use table scanning
- instead of reading with keys. The number says how many evaluation of the
- WHERE clause is comparable to reading one extra row from a table.
-*/
-#define TIME_FOR_COMPARE 5.0 // 5 WHERE compares == one read
-#define TIME_FOR_COMPARE_IDX 20.0
-
-#define IDX_BLOCK_COPY_COST ((double) 1 / TIME_FOR_COMPARE)
-#define IDX_LOOKUP_COST ((double) 1 / 8)
-#define MULTI_RANGE_READ_SETUP_COST (IDX_BLOCK_COPY_COST/10)
-
-/**
- Number of comparisons of table rowids equivalent to reading one row from a
- table.
-*/
-#define TIME_FOR_COMPARE_ROWID (TIME_FOR_COMPARE*100)
-
-/* cost1 is better that cost2 only if cost1 + COST_EPS < cost2 */
-#define COST_EPS 0.001
-
/*
- For sequential disk seeks the cost formula is:
- DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST * #blocks_to_skip
-
- The cost of average seek
- DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST*BLOCKS_IN_AVG_SEEK =1.0.
+ The lower bound of accepted rows when using filter.
+ This is used to ensure that filters are not too agressive.
*/
-#define DISK_SEEK_BASE_COST ((double)0.9)
-
-#define BLOCKS_IN_AVG_SEEK 128
-
-#define DISK_SEEK_PROP_COST ((double)0.1/BLOCKS_IN_AVG_SEEK)
-
+#define MIN_ROWS_AFTER_FILTERING 1.0
/**
- Number of rows in a reference table when refereed through a not unique key.
+ Number of rows in a reference table when refered through a not unique key.
This value is only used when we don't know anything about the key
distribution.
*/
#define MATCHING_ROWS_IN_OTHER_TABLE 10
-/*
- Subquery materialization-related constants
-*/
-#define HEAP_TEMPTABLE_LOOKUP_COST 0.05
-#define DISK_TEMPTABLE_LOOKUP_COST 1.0
-#define SORT_INDEX_CMP_COST 0.02
-
-
-#define COST_MAX (DBL_MAX * (1.0 - DBL_EPSILON))
-
-#define COST_ADD(c,d) (COST_MAX - (d) > (c) ? (c) + (d) : COST_MAX)
-
-#define COST_MULT(c,f) (COST_MAX / (f) > (c) ? (c) * (f) : COST_MAX)
-
-
#define MY_CHARSET_BIN_MB_MAXLEN 1
/** Don't pack string keys shorter than this (if PACK_KEYS=1 isn't used). */
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index d31d0b949ba..0c0f05aab07 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -526,7 +526,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
select=make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
if (unlikely(error))
DBUG_RETURN(TRUE);
- if ((select && select->check_quick(thd, safe_update, limit)) || !limit)
+ if (unlikely((select && select->check_quick(thd, safe_update, limit)) ||
+ table->stat_records() == 0 ||
+ !limit))
{
query_plan.set_impossible_where();
if (thd->lex->describe || thd->lex->analyze_stmt)
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 6f0857239dd..22dd7734aea 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -674,7 +674,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
{
SELECT_LEX_UNIT *unit= derived->get_unit();
SELECT_LEX *first_select;
- bool res= FALSE, keep_row_order;
+ bool res= FALSE, keep_row_order, distinct;
DBUG_ENTER("mysql_derived_prepare");
DBUG_PRINT("enter", ("unit: %p table_list: %p alias: '%s'",
unit, derived, derived->alias.str));
@@ -870,18 +870,26 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
goto exit;
/*
- Temp table is created so that it hounours if UNION without ALL is to be
+ Temp table is created so that it honors if UNION without ALL is to be
processed
- As 'distinct' parameter we always pass FALSE (0), because underlying
- query will control distinct condition by itself. Correct test of
- distinct underlying query will be is_unit_op &&
- !unit->union_distinct->next_select() (i.e. it is union and last distinct
- SELECT is last SELECT of UNION).
+ We pass as 'distinct' parameter in any of the above cases
+
+ 1) It is an UNION and the last part of an union is distinct (as
+ thus the final temporary table should not contain duplicates).
+ 2) It is not an UNION and the unit->distinct flag is set. This is the
+ case for WHERE A IN (...).
+
+ Note that the underlying query will also control distinct condition.
*/
thd->create_tmp_table_for_derived= TRUE;
+ distinct= (unit->first_select()->next_select() ?
+ unit->union_distinct && !unit->union_distinct->next_select() :
+ unit->distinct);
+
if (!(derived->table) &&
- derived->derived_result->create_result_table(thd, &unit->types, FALSE,
+ derived->derived_result->create_result_table(thd, &unit->types,
+ distinct,
(first_select->options |
thd->variables.option_bits |
TMP_TABLE_ALL_COLUMNS),
diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc
index 7c3e87d42e3..e98728eb443 100644
--- a/sql/sql_explain.cc
+++ b/sql/sql_explain.cc
@@ -1033,6 +1033,9 @@ void Explain_select::print_explain_json(Explain_query *query,
writer->add_member("select_id").add_ll(select_id);
add_linkage(writer);
+ if (cost != 0.0)
+ writer->add_member("cost").add_double(cost);
+
if (is_analyze && time_tracker.get_loops())
{
writer->add_member("r_loops").add_ll(time_tracker.get_loops());
@@ -1379,10 +1382,12 @@ double Explain_table_access::get_r_filtered()
}
-int Explain_table_access::print_explain(select_result_sink *output, uint8 explain_flags,
+int Explain_table_access::print_explain(select_result_sink *output,
+ uint8 explain_flags,
bool is_analyze,
uint select_id, const char *select_type,
- bool using_temporary, bool using_filesort)
+ bool using_temporary,
+ bool using_filesort)
{
THD *thd= output->thd; // note: for SHOW EXPLAIN, this is target thd.
MEM_ROOT *mem_root= thd->mem_root;
@@ -1911,10 +1916,21 @@ void Explain_table_access::print_explain_json(Explain_query *query,
rowid_filter->print_explain_json(query, writer, is_analyze);
}
+ if (loops != 0.0)
+ writer->add_member("loops").add_double(loops);
+
/* r_loops (not present in tabular output) */
if (is_analyze)
{
- writer->add_member("r_loops").add_ll(tracker.get_loops());
+ ha_rows loops= tracker.get_loops();
+ writer->add_member("r_loops").add_ll(loops);
+
+ if (type == JT_EQ_REF) // max one row
+ {
+ ha_rows table_loops= op_tracker.get_loops();
+ if (table_loops != loops)
+ writer->add_member("r_table_loops").add_ll(table_loops);
+ }
}
/* `rows` */
@@ -1940,7 +1956,13 @@ void Explain_table_access::print_explain_json(Explain_query *query,
else
writer->add_null();
}
+ }
+
+ if (cost != 0.0)
+ writer->add_member("cost").add_double(cost);
+ if (is_analyze)
+ {
if (op_tracker.get_loops())
{
double total_time= op_tracker.get_time_ms();
@@ -2011,6 +2033,9 @@ void Explain_table_access::print_explain_json(Explain_query *query,
writer->add_double(jbuf_tracker.get_filtered_after_where()*100.0);
else
writer->add_null();
+
+ writer->add_member("r_unpack_time_ms");
+ writer->add_double(jbuf_unpack_tracker.get_time_ms());
}
}
diff --git a/sql/sql_explain.h b/sql/sql_explain.h
index cfff664f81e..03eb8821a34 100644
--- a/sql/sql_explain.h
+++ b/sql/sql_explain.h
@@ -216,6 +216,7 @@ public:
message(NULL),
having(NULL), having_value(Item::COND_UNDEF),
using_temporary(false), using_filesort(false),
+ cost(0.0),
time_tracker(is_analyze),
aggr_tree(NULL)
{}
@@ -249,9 +250,10 @@ public:
bool using_temporary;
bool using_filesort;
+ double cost;
/* ANALYZE members */
Time_and_counter_tracker time_tracker;
-
+
/*
Part of query plan describing sorting, temp.table usage, and duplicate
removal
@@ -753,9 +755,11 @@ public:
class Explain_table_access : public Sql_alloc
{
public:
- Explain_table_access(MEM_ROOT *root) :
+ Explain_table_access(MEM_ROOT *root, bool timed) :
derived_select_number(0),
non_merged_sjm_number(0),
+ cost(0.0),
+ loops(0.0),
extra_tags(root),
range_checked_fer(NULL),
full_scan_on_null_key(false),
@@ -766,6 +770,7 @@ public:
pushed_index_cond(NULL),
sjm_nest(NULL),
pre_join_sort(NULL),
+ jbuf_unpack_tracker(timed),
rowid_filter(NULL)
{}
~Explain_table_access() { delete sjm_nest; }
@@ -823,6 +828,10 @@ public:
ha_rows rows;
double filtered;
+ /* Total cost incurred during one execution of this select */
+ double cost;
+
+ double loops;
/*
Contents of the 'Extra' column. Some are converted into strings, some have
parameters, values for which are stored below.
@@ -874,6 +883,7 @@ public:
Gap_time_tracker extra_time_tracker;
Table_access_tracker jbuf_tracker;
+ Time_and_counter_tracker jbuf_unpack_tracker;
Explain_rowid_filter *rowid_filter;
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 7235dc6472d..bd7f04d2ce8 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -674,7 +674,7 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler,
if ((c_key->flags & HA_SPATIAL) ||
c_key->algorithm == HA_KEY_ALG_FULLTEXT ||
(ha_rkey_mode != HA_READ_KEY_EXACT &&
- (table->file->index_flags(handler->keyno, 0, TRUE) &
+ (table->key_info[handler->keyno].index_flags &
(HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE)) == 0))
{
my_error(ER_KEY_DOESNT_SUPPORT, MYF(0),
@@ -690,8 +690,7 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler,
}
if (key_expr->elements < keyinfo->user_defined_key_parts &&
- (table->file->index_flags(handler->keyno, 0, TRUE) &
- HA_ONLY_WHOLE_INDEX))
+ (table->key_info[handler->keyno].index_flags & HA_ONLY_WHOLE_INDEX))
{
my_error(ER_KEY_DOESNT_SUPPORT, MYF(0),
table->file->index_type(handler->keyno), keyinfo->name.str);
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index f9932f11798..51fdd58b9c4 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -664,16 +664,19 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond,
/* Assume that no indexes cover all required fields */
table->covering_keys.clear_all();
+ table->file->info(HA_STATUS_VARIABLE);
+ table->used_stat_records= table->file->stats.records;
SQL_SELECT *res= make_select(table, 0, 0, cond, 0, 0, error);
- if (unlikely(*error) ||
- (likely(res) && unlikely(res->check_quick(thd, 0, HA_POS_ERROR))) ||
- (likely(res) && res->quick && unlikely(res->quick->reset())))
- {
- delete res;
- res=0;
- }
- return res;
+ if (unlikely(!res) || unlikely(*error))
+ goto error;
+ (void) res->check_quick(thd, 0, HA_POS_ERROR);
+ if (!res->quick || res->quick->reset() == 0)
+ return res;
+
+error:
+ delete res;
+ return 0;
}
/*
@@ -1076,7 +1079,9 @@ error:
new_trans.restore_old_transaction();
error2:
- DBUG_RETURN(TRUE);
+ if (!thd->is_error())
+ my_eof(thd);
+ DBUG_RETURN(thd->is_error());
}
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index c7b6a0bf6e4..3c19674ff96 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -910,7 +910,12 @@ int JOIN_CACHE::alloc_buffer()
min_buff_size= get_min_join_buffer_size();
buff_size= get_max_join_buffer_size(optimize_buff_size);
- for (tab= start_tab; tab!= join_tab;
+ /*
+ Compute the total buffer usage for all join buffers up to
+ and including the current one.
+ */
+ for (tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS, WITHOUT_CONST_TABLES);
+ tab != join_tab;
tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS))
{
cache= tab->cache;
@@ -1080,7 +1085,7 @@ int JOIN_CACHE::init(bool for_explain)
/*
- Check the possibility to read the access keys directly from the join buffer
+ Check the possibility to read the access keys directly from the join buffer
SYNOPSIS
check_emb_key_usage()
@@ -1600,6 +1605,7 @@ bool JOIN_CACHE::put_record()
bool JOIN_CACHE::get_record()
{
bool res;
+ ANALYZE_START_TRACKING(thd(), join_tab->jbuf_unpack_tracker);
uchar *prev_rec_ptr= 0;
if (with_length)
pos+= size_of_rec_len;
@@ -1615,6 +1621,7 @@ bool JOIN_CACHE::get_record()
if (prev_cache)
prev_cache->get_record_by_pos(prev_rec_ptr);
}
+ ANALYZE_STOP_TRACKING(thd(), join_tab->jbuf_unpack_tracker);
return res;
}
@@ -2144,12 +2151,12 @@ enum_nested_loop_state JOIN_CACHE::join_records(bool skip_last)
if (!join_tab->first_unmatched)
{
- bool pfs_batch_update= join_tab->pfs_batch_update(join);
- if (pfs_batch_update)
+ DBUG_ASSERT(join_tab->cached_pfs_batch_update == join_tab->pfs_batch_update());
+ if (join_tab->cached_pfs_batch_update)
join_tab->table->file->start_psi_batch_mode();
/* Find all records from join_tab that match records from join buffer */
rc= join_matching_records(skip_last);
- if (pfs_batch_update)
+ if (join_tab->cached_pfs_batch_update)
join_tab->table->file->end_psi_batch_mode();
if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
goto finish;
@@ -2319,7 +2326,8 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
if ((rc= join_tab_execution_startup(join_tab)) < 0)
goto finish2;
- join_tab->build_range_rowid_filter_if_needed();
+ if (join_tab->need_to_build_rowid_filter)
+ join_tab->build_range_rowid_filter();
/* Prepare to retrieve all records of the joined table */
if (unlikely((error= join_tab_scan->open())))
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index e98055c7b35..ab16a9d0b8b 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -2871,7 +2871,7 @@ void st_select_lex_node::init_query_common()
{
options= 0;
set_linkage(UNSPECIFIED_TYPE);
- distinct= TRUE;
+ distinct= FALSE;
no_table_names_allowed= 0;
uncacheable= 0;
}
@@ -5954,7 +5954,7 @@ unit_common_op st_select_lex_unit::common_op()
else
{
if (operation != op)
- operation= OP_MIX;
+ return OP_MIX;
}
}
}
@@ -5964,12 +5964,13 @@ unit_common_op st_select_lex_unit::common_op()
Save explain structures of a UNION. The only variable member is whether the
union has "Using filesort".
- There is also save_union_explain_part2() function, which is called before we read
- UNION's output.
+ There is also save_union_explain_part2() function, which is called before we
+ read UNION's output.
The reason for it is examples like this:
- SELECT col1 FROM t1 UNION SELECT col2 FROM t2 ORDER BY (select ... from t3 ...)
+ SELECT col1 FROM t1 UNION SELECT col2 FROM t2
+ ORDER BY (select ... from t3 ...)
Here, the (select ... from t3 ...) subquery must be a child of UNION's
st_select_lex. However, it is not connected as child until a very late
@@ -10143,6 +10144,7 @@ SELECT_LEX_UNIT *LEX::parsed_select_expr_start(SELECT_LEX *s1, SELECT_LEX *s2,
if (res == NULL)
return NULL;
res->pre_last_parse= sel1;
+ res->distinct= distinct;
push_select(res->fake_select_lex);
return res;
}
@@ -10189,7 +10191,7 @@ SELECT_LEX_UNIT *LEX::parsed_select_expr_cont(SELECT_LEX_UNIT *unit,
/**
Add primary expression as the next term in a given query expression body
- pruducing a new query expression body
+ producing a new query expression body
*/
SELECT_LEX_UNIT *
@@ -10389,7 +10391,6 @@ bool LEX::parsed_TVC_start()
insert_list= 0;
if (!(sel= alloc_select(TRUE)) || push_select(sel))
return true;
- sel->init_select();
sel->braces= FALSE; // just initialisation
return false;
}
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index fe574db528f..73924bb9d1c 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -836,8 +836,6 @@ err:
#ifndef EMBEDDED_LIBRARY
-
-/* Not a very useful function; just to avoid duplication of code */
static bool write_execute_load_query_log_event(THD *thd, const sql_exchange* ex,
const char* db_arg, /* table's database */
const char* table_name_arg,
@@ -848,27 +846,34 @@ static bool write_execute_load_query_log_event(THD *thd, const sql_exchange* ex,
int errcode)
{
char *load_data_query;
- my_off_t fname_start,
- fname_end;
- List<Item> fv;
+ my_off_t fname_start, fname_end;
Item *item, *val;
int n;
- const char *tdb= (thd->db.str != NULL ? thd->db.str : db_arg);
- const char *qualify_db= NULL;
- char command_buffer[1024];
- String query_str(command_buffer, sizeof(command_buffer),
- system_charset_info);
+ StringBuffer<1024> query_str(system_charset_info);
- Load_log_event lle(thd, ex, tdb, table_name_arg, fv, is_concurrent,
- duplicates, ignore, transactional_table);
+ query_str.append(STRING_WITH_LEN("LOAD DATA "));
+
+ if (is_concurrent)
+ query_str.append(STRING_WITH_LEN("CONCURRENT "));
+
+ fname_start= query_str.length();
- /*
- force in a LOCAL if there was one in the original.
- */
if (thd->lex->local_file)
- lle.set_fname_outside_temp_buf(ex->file_name, strlen(ex->file_name));
+ query_str.append(STRING_WITH_LEN("LOCAL "));
+ query_str.append(STRING_WITH_LEN("INFILE '"));
+ query_str.append_for_single_quote(ex->file_name, strlen(ex->file_name));
+ query_str.append(STRING_WITH_LEN("' "));
- query_str.length(0);
+ if (duplicates == DUP_REPLACE)
+ query_str.append(STRING_WITH_LEN("REPLACE "));
+ else if (ignore)
+ query_str.append(STRING_WITH_LEN("IGNORE "));
+
+ query_str.append(STRING_WITH_LEN("INTO"));
+
+ fname_end= query_str.length();
+
+ query_str.append(STRING_WITH_LEN(" TABLE "));
if (!thd->db.str || strcmp(db_arg, thd->db.str))
{
/*
@@ -876,10 +881,47 @@ static bool write_execute_load_query_log_event(THD *thd, const sql_exchange* ex,
prefix table name with database name so that it
becomes a FQ name.
*/
- qualify_db= db_arg;
+ append_identifier(thd, &query_str, db_arg, strlen(db_arg));
+ query_str.append(STRING_WITH_LEN("."));
+ }
+ append_identifier(thd, &query_str, table_name_arg, strlen(table_name_arg));
+
+ if (ex->cs)
+ {
+ query_str.append(STRING_WITH_LEN(" CHARACTER SET "));
+ query_str.append(ex->cs->cs_name);
+ }
+
+ /* We have to create all optional fields as the default is not empty */
+ query_str.append(STRING_WITH_LEN(" FIELDS TERMINATED BY '"));
+ query_str.append_for_single_quote(ex->field_term);
+ query_str.append(STRING_WITH_LEN("'"));
+ if (ex->opt_enclosed)
+ query_str.append(STRING_WITH_LEN(" OPTIONALLY"));
+ query_str.append(STRING_WITH_LEN(" ENCLOSED BY '"));
+ query_str.append_for_single_quote(ex->enclosed);
+ query_str.append(STRING_WITH_LEN("'"));
+
+ query_str.append(STRING_WITH_LEN(" ESCAPED BY '"));
+ query_str.append_for_single_quote(ex->escaped);
+ query_str.append(STRING_WITH_LEN("'"));
+
+ query_str.append(STRING_WITH_LEN(" LINES TERMINATED BY '"));
+ query_str.append_for_single_quote(ex->line_term);
+ query_str.append(STRING_WITH_LEN("'"));
+ if (ex->line_start->length())
+ {
+ query_str.append(STRING_WITH_LEN(" STARTING BY '"));
+ query_str.append_for_single_quote(ex->line_start);
+ query_str.append(STRING_WITH_LEN("'"));
+ }
+
+ if (ex->skip_lines)
+ {
+ query_str.append(STRING_WITH_LEN(" IGNORE "));
+ query_str.append_ulonglong(ex->skip_lines);
+ query_str.append(STRING_WITH_LEN(" LINES "));
}
- lle.print_query(thd, FALSE, (const char*) ex->cs ? ex->cs->cs_name.str : NULL,
- &query_str, &fname_start, &fname_end, qualify_db);
/*
prepare fields-list and SET if needed; print_query won't do that for us.
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index b5d93e6fd99..7a171174a72 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -7591,15 +7591,12 @@ void THD::reset_for_next_command(bool do_clear_error)
save_prep_leaf_list= false;
-#ifdef WITH_WSREP
-#if !defined(DBUG_OFF)
+#if defined(WITH_WSREP) && !defined(DBUG_OFF)
if (mysql_bin_log.is_open())
-#endif
-#endif
- DBUG_PRINT("debug",
+ DBUG_PRINT("info",
("is_current_stmt_binlog_format_row(): %d",
is_current_stmt_binlog_format_row()));
-
+#endif
DBUG_VOID_RETURN;
}
@@ -8216,8 +8213,6 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ptr->mdl_type= mdl_type;
ptr->table_options= table_options;
ptr->updating= MY_TEST(table_options & TL_OPTION_UPDATING);
- /* TODO: remove TL_OPTION_FORCE_INDEX as it looks like it's not used */
- ptr->force_index= MY_TEST(table_options & TL_OPTION_FORCE_INDEX);
ptr->ignore_leaves= MY_TEST(table_options & TL_OPTION_IGNORE_LEAVES);
ptr->sequence= MY_TEST(table_options & TL_OPTION_SEQUENCE);
ptr->derived= table->sel;
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 5a077a934ac..094ff52a4ea 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -1830,6 +1830,7 @@ static bool register_builtin(struct st_maria_plugin *plugin,
/*
called only by plugin_init()
*/
+
static void plugin_load(MEM_ROOT *tmp_root)
{
TABLE_LIST tables;
@@ -1847,6 +1848,8 @@ static void plugin_load(MEM_ROOT *tmp_root)
new_thd->thread_stack= (char*) &tables;
new_thd->store_globals();
+ new_thd->set_query_inner((char*) STRING_WITH_LEN("intern:plugin_load"),
+ default_charset_info);
new_thd->db= MYSQL_SCHEMA_NAME;
bzero((char*) &new_thd->net, sizeof(new_thd->net));
tables.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_PLUGIN_NAME, 0, TL_READ);
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index 8f0f15a982a..8104806af71 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -83,6 +83,8 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
{
thd->thread_stack= (char*) &tmp_thd;
thd->store_globals();
+ thd->set_query_inner((char*) STRING_WITH_LEN("intern:reload_acl"),
+ default_charset_info);
}
if (likely(thd))
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index d338dc302e6..80a746f32b7 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1467,7 +1467,7 @@ gtid_state_from_pos(const char *name, uint32 offset,
if (unlikely((file= open_binlog(&cache, name, &errormsg)) == (File)-1))
return errormsg;
- if (!(fdev= new Format_description_log_event(3)))
+ if (!(fdev= new Format_description_log_event(4)))
{
errormsg= "Out of memory initializing format_description event "
"while scanning binlog to find start position";
@@ -2273,7 +2273,7 @@ static int send_format_descriptor_event(binlog_send_info *info, IO_CACHE *log,
if (info->fdev != NULL)
delete info->fdev;
- if (!(info->fdev= new Format_description_log_event(3)))
+ if (!(info->fdev= new Format_description_log_event(4)))
{
info->errmsg= "Out of memory initializing format_description event";
info->error= ER_MASTER_FATAL_ERROR_READING_BINLOG;
@@ -4163,7 +4163,7 @@ bool mysql_show_binlog_events(THD* thd)
}
Format_description_log_event *description_event= new
- Format_description_log_event(3); /* MySQL 4.0 by default */
+ Format_description_log_event(4);
if (binary_log->is_open())
{
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 51bef49277e..26987c9072e 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -47,6 +47,7 @@
// print_sjm, print_plan, TEST_join
#include "records.h" // init_read_record, end_read_record
#include "filesort.h" // filesort_free_buffers
+#include "filesort_utils.h" // get_qsort_sort_cost
#include "sql_union.h" // mysql_union
#include "opt_subselect.h"
#include "sql_derived.h"
@@ -69,6 +70,7 @@
#include "opt_trace.h"
#include "derived_handler.h"
#include "create_tmp_table.h"
+#include "optimizer_defaults.h"
/*
A key part number that means we're using a fulltext scan.
@@ -83,6 +85,30 @@
*/
#define FT_KEYPART (MAX_FIELDS+10)
+/*
+ We assume that when we do hash join, only 10 % rows in the hash will
+ match the current found row.
+*/
+#define HASH_FANOUT 0.1
+
+/*
+ The following is used to check that A <= B, but with some margin as the
+ calculation is done slightly differently (mathematically correct, but
+ double calculations are not exact).
+ This is only used when comparing read rows and output rows, which
+ means that we can assume that both values are >= 0 and B cannot be notable
+ smaller than A.
+*/
+
+#define crash_if_first_double_is_bigger(A,B) DBUG_ASSERT(((A) == 0.0 && (B) == 0.0) || (A)/(B) < 1.0000001)
+
+#define double_to_rows(A) ((A) >= ((double)HA_ROWS_MAX) ? HA_ROWS_MAX : (ha_rows) (A))
+
+inline double safe_filtered(double a, double b)
+{
+ return b != 0 ? a/b*100.0 : 0.0;
+}
+
const char *join_type_str[]={ "UNKNOWN","system","const","eq_ref","ref",
"MAYBE_REF","ALL","range","index","fulltext",
"ref_or_null","unique_subquery","index_subquery",
@@ -232,7 +258,6 @@ static COND *make_cond_for_table_from_pred(THD *thd, Item *root_cond,
bool is_top_and_level);
static Item* part_of_refkey(TABLE *form,Field *field);
-uint find_shortest_key(TABLE *table, const key_map *usable_keys);
static bool test_if_cheaper_ordering(const JOIN_TAB *tab,
ORDER *order, TABLE *table,
key_map usable_keys, int key,
@@ -305,8 +330,9 @@ static JOIN_TAB *next_breadth_first_tab(JOIN_TAB *first_top_tab,
static bool find_order_in_list(THD *, Ref_ptr_array, TABLE_LIST *, ORDER *,
List<Item> &, List<Item> &, bool, bool, bool);
-static double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
- table_map rem_tables);
+static double table_after_join_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
+ table_map rem_tables,
+ double *records_out);
void set_postjoin_aggr_write_func(JOIN_TAB *tab);
static Item **get_sargable_cond(JOIN *join, TABLE *table);
@@ -323,6 +349,9 @@ static void fix_items_after_optimize(THD *thd, SELECT_LEX *select_lex);
static void optimize_rownum(THD *thd, SELECT_LEX_UNIT *unit, Item *cond);
static bool process_direct_rownum_comparison(THD *thd, SELECT_LEX_UNIT *unit,
Item *cond);
+static double prev_record_reads(const POSITION *positions, uint idx,
+ table_map found_ref, double record_count,
+ double *same_keys);
#ifndef DBUG_OFF
@@ -353,8 +382,8 @@ void dbug_serve_apcs(THD *thd, int n_calls)
Intended usage:
DBUG_EXECUTE_IF("show_explain_probe_2",
- if (dbug_user_var_equals_int(thd, "select_id", select_id))
- dbug_serve_apcs(thd, 1);
+ if (dbug_user_var_equals_int(thd, "select_id", select_id))
+ dbug_serve_apcs(thd, 1);
);
*/
@@ -408,9 +437,10 @@ bool dbug_user_var_equals_str(THD *thd, const char *name, const char* value)
POSITION::POSITION()
{
table= 0;
- records_read= cond_selectivity= read_time= 0.0;
+ records_read= cond_selectivity= read_time= records_out= records_init= 0.0;
prefix_record_count= 0.0;
key= 0;
+ forced_index= 0;
use_join_buffer= 0;
sj_strategy= SJ_OPT_NONE;
n_sj_tables= 0;
@@ -507,7 +537,6 @@ void JOIN::init(THD *thd_arg, List<Item> &fields_arg,
no_const_tables= FALSE;
first_select= sub_select;
- set_group_rpa= false;
group_sent= 0;
outer_ref_cond= pseudo_bits_cond= NULL;
@@ -532,9 +561,10 @@ static void trace_table_dependencies(THD *thd,
{
TABLE_LIST *table_ref= join_tabs[i].tab_list;
Json_writer_object trace_one_table(thd);
- trace_one_table.add_table_name(&join_tabs[i]);
- trace_one_table.add("row_may_be_null",
- (bool)table_ref->table->maybe_null);
+ trace_one_table.
+ add_table_name(&join_tabs[i]).
+ add("row_may_be_null",
+ (bool)table_ref->table->maybe_null);
const table_map map= table_ref->get_map();
DBUG_ASSERT(map < (1ULL << table_count));
for (uint j= 0; j < table_count; j++)
@@ -721,7 +751,7 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
else
{
for (sum_func= ref->in_sum_func; sum_func &&
- sum_func->aggr_level >= select->nest_level;
+ sum_func->aggr_level >= select->nest_level;
sum_func= sum_func->in_sum_func)
{
if (sum_func->aggr_level == select->nest_level)
@@ -1736,7 +1766,7 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num,
}
}
- if (thd->trace_started())
+ if (unlikely(thd->trace_started()))
{
Json_writer_object trace_wrapper(thd);
opt_trace_print_expanded_query(thd, select_lex, &trace_wrapper);
@@ -1876,6 +1906,13 @@ int JOIN::optimize()
res= build_explain();
optimization_state= JOIN::OPTIMIZATION_DONE;
}
+
+ /*
+ Store the cost of this query into a user variable
+ TODO: calculate a correct cost for a query with subqueries and UNIONs.
+ */
+ if (select_lex->select_number == 1)
+ thd->status_var.last_query_cost= best_read;
return res;
}
@@ -1917,9 +1954,9 @@ bool JOIN::make_range_rowid_filters()
continue;
DBUG_ASSERT(!(tab->ref.key >= 0 &&
- tab->ref.key == (int) tab->range_rowid_filter_info->key_no));
+ tab->ref.key == (int) tab->range_rowid_filter_info->get_key_no()));
DBUG_ASSERT(!(tab->ref.key == -1 && tab->quick &&
- tab->quick->index == tab->range_rowid_filter_info->key_no));
+ tab->quick->index == tab->range_rowid_filter_info->get_key_no()));
int err;
SQL_SELECT *sel= NULL;
@@ -1932,14 +1969,11 @@ bool JOIN::make_range_rowid_filters()
key_map filter_map;
filter_map.clear_all();
- filter_map.set_bit(tab->range_rowid_filter_info->key_no);
+ filter_map.set_bit(tab->range_rowid_filter_info->get_key_no());
filter_map.merge(tab->table->with_impossible_ranges);
- bool force_index_save= tab->table->force_index;
- tab->table->force_index= true;
int rc= sel->test_quick_select(thd, filter_map, (table_map) 0,
(ha_rows) HA_POS_ERROR,
- true, false, true, true);
- tab->table->force_index= force_index_save;
+ true /* force index */, false, true, true);
if (thd->is_error())
goto no_filter;
/*
@@ -1962,7 +1996,10 @@ bool JOIN::make_range_rowid_filters()
tab->range_rowid_filter_info,
filter_container, sel);
if (tab->rowid_filter)
+ {
+ tab->need_to_build_rowid_filter= true;
continue;
+ }
}
no_filter:
if (sel->quick)
@@ -1990,24 +2027,23 @@ bool JOIN::make_range_rowid_filters()
bool
JOIN::init_range_rowid_filters()
{
- DBUG_ENTER("init_range_rowid_filters");
-
JOIN_TAB *tab;
+ DBUG_ENTER("init_range_rowid_filters");
for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
tab;
tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
+ tab->need_to_build_rowid_filter= false; // Safety
if (!tab->rowid_filter)
continue;
if (tab->rowid_filter->get_container()->alloc())
{
- delete tab->rowid_filter;
- tab->rowid_filter= 0;
+ tab->clear_range_rowid_filter();
continue;
}
tab->table->file->rowid_filter_push(tab->rowid_filter);
- tab->is_rowid_filter_built= false;
+ tab->need_to_build_rowid_filter= true;
}
DBUG_RETURN(0);
}
@@ -2029,6 +2065,7 @@ JOIN::optimize_inner()
{
DBUG_ENTER("JOIN::optimize_inner");
subq_exit_fl= false;
+ best_read= 0.0;
DEBUG_SYNC(thd, "before_join_optimize");
THD_STAGE_INFO(thd, stage_optimizing);
@@ -2248,7 +2285,8 @@ JOIN::optimize_inner()
(see build_equal_items() below) because it can be not rebuilt
at second invocation.
*/
- if (!thd->stmt_arena->is_conventional() && thd->mem_root != thd->stmt_arena->mem_root)
+ if (!thd->stmt_arena->is_conventional() &&
+ thd->mem_root != thd->stmt_arena->mem_root)
for (TABLE_LIST *tbl= tables_list; tbl; tbl= tbl->next_local)
if (tbl->table && tbl->on_expr && tbl->table->versioned())
{
@@ -2587,8 +2625,12 @@ int JOIN::optimize_stage2()
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
DBUG_RETURN(1);
- if (optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_WITH_KEYS))
- drop_unused_derived_keys();
+ /*
+ We have to call drop_unused_derived_keys() even if we don't have any
+ generated keys (enabled with OPTIMIZER_SWITCH_DERIVED_WITH_KEYS)
+ as we may still have unique constraints we have to get rid of.
+ */
+ drop_unused_derived_keys();
if (rollup.state != ROLLUP::STATE_NONE)
{
@@ -2621,7 +2663,7 @@ int JOIN::optimize_stage2()
goto setup_subq_exit;
}
if (!(thd->variables.option_bits & OPTION_BIG_SELECTS) &&
- best_read > (double) thd->variables.max_join_size &&
+ join_record_count > (double) thd->variables.max_join_size &&
!(select_options & SELECT_DESCRIBE))
{ /* purecov: inspected */
my_message(ER_TOO_BIG_SELECT, ER_THD(thd, ER_TOO_BIG_SELECT), MYF(0));
@@ -3188,6 +3230,8 @@ int JOIN::optimize_stage2()
*/
if ((order || group_list) &&
tab->type != JT_ALL &&
+ tab->type != JT_RANGE &&
+ tab->type != JT_NEXT &&
tab->type != JT_FT &&
tab->type != JT_REF_OR_NULL &&
((order && simple_order) || (group_list && simple_group)))
@@ -3571,7 +3615,7 @@ bool JOIN::make_aggr_tables_info()
TABLE* table= create_tmp_table(thd, curr_tab->tmp_table_param,
all_fields,
NULL, distinct,
- TRUE, select_options, HA_POS_ERROR,
+ TRUE, select_options, HA_ROWS_MAX,
&empty_clex_str, !need_tmp,
keep_row_order);
if (!table)
@@ -4060,7 +4104,14 @@ bool JOIN::make_aggr_tables_info()
*/
if (unit->lim.is_with_ties())
{
- if (alloc_order_fields(this, order, with_ties_order_count))
+ /*
+ When ORDER BY is eliminated, we make use of the GROUP BY list.
+ We've already counted how many elements from ORDER BY
+ must be evaluated as part of WITH TIES so we use that.
+ */
+ ORDER *order_src = order ? order : group_list;
+ if (alloc_order_fields(this, order_src,
+ with_ties_order_count))
DBUG_RETURN(true);
}
@@ -4227,14 +4278,13 @@ bool
JOIN::add_sorting_to_table(JOIN_TAB *tab, ORDER *order)
{
tab->filesort=
- new (thd->mem_root) Filesort(order, HA_POS_ERROR, tab->keep_current_rowid,
+ new (thd->mem_root) Filesort(order, HA_ROWS_MAX, tab->keep_current_rowid,
tab->select);
if (!tab->filesort)
return true;
TABLE *table= tab->table;
if ((tab == join_tab + const_tables) &&
- table->pos_in_table_list &&
table->pos_in_table_list->is_sjm_scan_table())
{
tab->filesort->set_all_read_bits= TRUE;
@@ -4409,6 +4459,15 @@ bool JOIN::shrink_join_buffers(JOIN_TAB *jt,
}
buff_size= cache->get_join_buffer_size();
curr_space-= buff_size;
+ if (needed_space < buff_size)
+ {
+ /*
+ Safety: fail if we've exhausted available buffer space with
+ reduced join buffers.
+ */
+ DBUG_ASSERT(0);
+ return TRUE;
+ }
needed_space-= buff_size;
}
}
@@ -4450,7 +4509,6 @@ JOIN::reinit()
if (current_ref_ptrs != items0)
{
set_items_ref_array(items0);
- set_group_rpa= false;
}
/* need to reset ref access state (see join_read_key) */
@@ -4601,8 +4659,9 @@ bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
}
-void JOIN::exec()
+int JOIN::exec()
{
+ int res;
DBUG_EXECUTE_IF("show_explain_probe_join_exec_start",
if (dbug_user_var_equals_int(thd,
"show_explain_probe_select_id",
@@ -4610,7 +4669,7 @@ void JOIN::exec()
dbug_serve_apcs(thd, 1);
);
ANALYZE_START_TRACKING(thd, &explain->time_tracker);
- exec_inner();
+ res= exec_inner();
ANALYZE_STOP_TRACKING(thd, &explain->time_tracker);
DBUG_EXECUTE_IF("show_explain_probe_join_exec_end",
@@ -4619,10 +4678,11 @@ void JOIN::exec()
select_lex->select_number))
dbug_serve_apcs(thd, 1);
);
+ return res;
}
-void JOIN::exec_inner()
+int JOIN::exec_inner()
{
List<Item> *columns_list= &fields_list;
DBUG_ENTER("JOIN::exec_inner");
@@ -4658,12 +4718,12 @@ void JOIN::exec_inner()
{
thd->set_examined_row_count(0);
thd->limit_found_rows= 0;
- DBUG_VOID_RETURN;
+ DBUG_RETURN(0);
}
columns_list= &procedure_fields_list;
}
if (result->prepare2(this))
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
if (!tables_list && (table_count || !select_lex->with_sum_func) &&
!select_lex->have_window_funcs())
@@ -4677,7 +4737,7 @@ void JOIN::exec_inner()
Protocol::SEND_NUM_ROWS |
Protocol::SEND_EOF))
{
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
}
/*
@@ -4715,7 +4775,7 @@ void JOIN::exec_inner()
/* Single select (without union) always returns 0 or 1 row */
thd->limit_found_rows= send_records;
thd->set_examined_row_count(0);
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
}
/*
@@ -4735,7 +4795,7 @@ void JOIN::exec_inner()
if (unlikely(thd->is_error()))
{
error= thd->is_error();
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
}
if (zero_result_cause)
@@ -4759,7 +4819,7 @@ void JOIN::exec_inner()
select_options,
zero_result_cause,
having ? having : tmp_having, all_fields);
- DBUG_VOID_RETURN;
+ DBUG_RETURN(0);
}
}
@@ -4783,14 +4843,14 @@ void JOIN::exec_inner()
if (unlikely(thd->is_error()))
{
error= thd->is_error();
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
}
}
}
if ((this->select_lex->options & OPTION_SCHEMA_TABLE) &&
get_schema_tables_result(this, PROCESSED_BY_JOIN_EXEC))
- DBUG_VOID_RETURN;
+ DBUG_RETURN(0);
if (select_options & SELECT_DESCRIBE)
{
@@ -4798,13 +4858,13 @@ void JOIN::exec_inner()
order != 0 && !skip_sort_order,
select_distinct,
!table_count ? "No tables used" : NullS);
- DBUG_VOID_RETURN;
+ DBUG_RETURN(0);
}
else if (select_lex->pushdown_select)
{
/* Execute the query pushed into a foreign engine */
error= select_lex->pushdown_select->execute();
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
}
else
{
@@ -4823,7 +4883,7 @@ void JOIN::exec_inner()
if (unlikely(thd->is_error()))
{
error= thd->is_error();
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
}
THD_STAGE_INFO(thd, stage_sending_data);
@@ -4838,7 +4898,7 @@ void JOIN::exec_inner()
DBUG_PRINT("counts", ("thd->examined_row_count: %lu",
(ulong) thd->get_examined_row_count()));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
}
@@ -5011,7 +5071,7 @@ mysql_select(THD *thd, TABLE_LIST *tables, List<Item> &fields, COND *conds,
SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex)
{
int err= 0;
- bool free_join= 1;
+ bool free_join= 1, exec_error= 0;
DBUG_ENTER("mysql_select");
if (!fields.is_empty())
@@ -5090,7 +5150,7 @@ mysql_select(THD *thd, TABLE_LIST *tables, List<Item> &fields, COND *conds,
if (unlikely(thd->is_error()))
goto err;
- join->exec();
+ exec_error= join->exec();
if (thd->lex->describe & DESCRIBE_EXTENDED)
{
@@ -5110,9 +5170,9 @@ err:
{
THD_STAGE_INFO(thd, stage_end);
err|= (int)(select_lex->cleanup());
- DBUG_RETURN(err || thd->is_error());
+ DBUG_RETURN(exec_error || err || thd->is_error());
}
- DBUG_RETURN(join->error ? join->error: err);
+ DBUG_RETURN(exec_error || err);
}
@@ -5265,7 +5325,6 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
DYNAMIC_ARRAY *keyuse_array)
{
int error= 0;
- TABLE *UNINIT_VAR(table); /* inited in all loops */
uint i,table_count,const_count,key;
uint sort_space;
table_map found_const_table_map, all_table_map;
@@ -5326,11 +5385,13 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
for (s= stat, i= 0; (tables= ti++); s++, i++)
{
TABLE_LIST *embedding= tables->embedding;
+ TABLE *table= tables->table;
stat_vector[i]=s;
- table_vector[i]=s->table=table=tables->table;
+ table_vector[i]= s->table= table;
s->tab_list= tables;
table->pos_in_table_list= tables;
error= tables->fetch_number_of_rows();
+ /* Calculate table->use_stat_records */
set_statistics_for_table(join->thd, table);
bitmap_clear_all(&table->cond_set);
@@ -5358,7 +5419,14 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
s->dependent= tables->dep_tables;
if (tables->schema_table)
- table->file->stats.records= table->used_stat_records= 2;
+ {
+ /*
+ Information schema is slow and we don't know how many rows we will
+ find. Be setting a moderate ammount of rows we are more likely
+ to have it materialized if needed.
+ */
+ table->file->stats.records= table->used_stat_records= 100;
+ }
table->opt_range_condition_rows= table->stat_records();
s->on_expr_ref= &tables->on_expr;
@@ -5426,6 +5494,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
{
set_position(join,const_count++,s,(KEYUSE*) 0);
no_rows_const_tables |= table->map;
+ table->file->stats.records= 0;
}
}
@@ -5459,7 +5528,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
for (s= stat ; s < stat_end ; s++)
{
- table= s->table;
+ TABLE *table= s->table;
for (JOIN_TAB *t= stat ; t < stat_end ; t++)
{
if (t->dependent & table->map)
@@ -5493,7 +5562,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
}
}
- if (thd->trace_started())
+ if (unlikely(thd->trace_started()))
trace_table_dependencies(thd, stat, join->table_count);
if (join->conds || outer_join)
@@ -5514,7 +5583,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
skip_unprefixed_keyparts))
goto error;
DBUG_EXECUTE("opt", print_keyuse_array(keyuse_array););
- if (thd->trace_started())
+ if (unlikely(thd->trace_started()))
print_keyuse_array_for_trace(thd, keyuse_array);
}
@@ -5563,7 +5632,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
for (JOIN_TAB **pos=stat_vector+const_count ; (s= *pos) ; pos++)
{
- table=s->table;
+ TABLE *table= s->table;
if (table->is_filled_at_execution())
continue;
@@ -5616,10 +5685,11 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
(*s->on_expr_ref)->is_expensive()))
{ // system table
int tmp= 0;
- s->type=JT_SYSTEM;
+ s->type= JT_SYSTEM;
join->const_table_map|=table->map;
set_position(join,const_count++,s,(KEYUSE*) 0);
- if ((tmp= join_read_const_table(join->thd, s, join->positions+const_count-1)))
+ if ((tmp= join_read_const_table(join->thd, s,
+ join->positions+const_count-1)))
{
if (tmp > 0)
goto error; // Fatal error
@@ -5683,7 +5753,14 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
base_const_ref.intersect(base_part);
base_eq_part= eq_part;
base_eq_part.intersect(base_part);
- if (table->actual_key_flags(keyinfo) & HA_NOSAME)
+
+ /*
+ We can read the const record if we are using a full unique key and
+ if the table is not an unopened to be materialized table/view.
+ */
+ if ((table->actual_key_flags(keyinfo) & HA_NOSAME) &&
+ (!s->table->pos_in_table_list->is_materialized_derived() ||
+ s->table->pos_in_table_list->fill_me))
{
if (base_const_ref == base_eq_part &&
@@ -5811,52 +5888,43 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
s->startup_cost= 0;
if (s->type == JT_SYSTEM || s->type == JT_CONST)
{
-
Json_writer_object table_records(thd);
- /* Only one matching row */
- s->found_records= s->records= 1;
+ ha_rows records= 1;
+ if (s->type == JT_SYSTEM || s->table->file->stats.records == 0)
+ records= s->table->file->stats.records;
+ /* zero or one matching row */
+ s->records= s->found_records= records;
+ s->records_init= s->records_out= rows2double(records);
s->read_time=1.0;
- s->worst_seeks=1.0;
- table_records.add_table_name(s)
- .add("rows", s->found_records)
- .add("cost", s->read_time)
- .add("table_type", s->type == JT_CONST ?
- "const" :
- "system");
+ table_records.add_table_name(s).
+ add("rows", s->found_records).
+ add("cost", s->read_time).
+ add("table_type", s->type == JT_CONST ?
+ "const" : "system");
continue;
}
- /* Approximate found rows and time to read them */
- if (s->table->is_filled_at_execution())
- {
- get_delayed_table_estimates(s->table, &s->records, &s->read_time,
- &s->startup_cost);
- s->found_records= s->records;
- table->opt_range_condition_rows=s->records;
- }
- else
- s->scan_time();
+ /*
+ Approximate found rows and time to read them
+ Update found_records, records, read_time and other scan related
+ variables
+ */
+ s->estimate_scan_time();
if (s->table->is_splittable())
s->add_keyuses_for_splitting();
/*
- Set a max range of how many seeks we can expect when using keys
- This is can't be to high as otherwise we are likely to use
- table scan.
- */
- s->worst_seeks= MY_MIN((double) s->found_records / 10,
- (double) s->read_time*3);
- if (s->worst_seeks < 2.0) // Fix for small tables
- s->worst_seeks=2.0;
-
- /*
Add to stat->const_keys those indexes for which all group fields or
all select distinct fields participate in one index.
*/
add_group_and_distinct_keys(join, s);
- s->table->cond_selectivity= 1.0;
-
+ /* This will be updated in calculate_cond_selectivity_for_table() */
+ s->table->set_cond_selectivity(1.0);
+ DBUG_ASSERT(s->table->used_stat_records == 0 ||
+ s->table->cond_selectivity <=
+ s->table->opt_range_condition_rows /
+ s->table->used_stat_records);
/*
Perform range analysis if there are keys it could use (1).
Don't do range analysis for materialized subqueries (2).
@@ -5869,7 +5937,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
s->table->pos_in_table_list->is_materialized_derived())) // (3)
{
bool impossible_range= FALSE;
- ha_rows records= HA_POS_ERROR;
+ ha_rows records= HA_ROWS_MAX;
SQL_SELECT *select= 0;
Item **sargable_cond= NULL;
if (!s->const_keys.is_clear_all())
@@ -5936,6 +6004,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
}
else
{
+ double records= 1;
join->const_table_map|= s->table->map;
set_position(join,const_count++,s,(KEYUSE*) 0);
s->type= JT_CONST;
@@ -5946,7 +6015,10 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
s->info= ET_IMPOSSIBLE_ON_CONDITION;
found_const_table_map|= s->table->map;
mark_as_null_row(s->table); // All fields are NULL
+ records= 0;
}
+ s->records_init= s->records_out= records;
+ s->found_records= s->records= (ha_rows)records;
}
}
if (records != HA_POS_ERROR)
@@ -5958,13 +6030,13 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
delete select;
else
{
- if (thd->trace_started())
+ if (unlikely(thd->trace_started()))
add_table_scan_values_to_trace(thd, s);
}
}
else
{
- if (thd->trace_started())
+ if (unlikely(thd->trace_started()))
add_table_scan_values_to_trace(thd, s);
}
}
@@ -6000,13 +6072,12 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
DBUG_RETURN(TRUE); /* purecov: inspected */
{
- double records= 1;
SELECT_LEX_UNIT *unit= join->select_lex->master_unit();
/* Find an optimal join order of the non-constant tables. */
if (join->const_tables != join->table_count)
{
- if (choose_plan(join, all_table_map & ~join->const_table_map))
+ if (choose_plan(join, all_table_map & ~join->const_table_map, 0))
goto error;
#ifdef HAVE_valgrind
@@ -6021,7 +6092,8 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
memcpy((uchar*) join->best_positions,(uchar*) join->positions,
sizeof(POSITION)*join->const_tables);
join->join_record_count= 1.0;
- join->best_read=1.0;
+ /* Const tables are part of optimizer setup and not counted in cost */
+ join->best_read=0.0;
}
if (!(join->select_options & SELECT_DESCRIBE) &&
@@ -6031,10 +6103,12 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
Calculate estimated number of rows for materialized derived
table/view.
*/
+ double records= 1.0;
+ ha_rows rows;
for (i= 0; i < join->table_count ; i++)
if (double rr= join->best_positions[i].records_read)
records= COST_MULT(records, rr);
- ha_rows rows= records > (double) HA_ROWS_MAX ? HA_ROWS_MAX : (ha_rows) records;
+ rows= double_to_rows(records);
set_if_smaller(rows, unit->lim.get_select_limit());
join->select_lex->increase_derived_records(rows);
}
@@ -6382,8 +6456,8 @@ add_key_field(JOIN *join,
Field op formula
Field IS NULL
Field IS NOT NULL
- Field BETWEEN ...
- Field IN ...
+ Field BETWEEN ...
+ Field IN ...
*/
if (field->flags & PART_KEY_FLAG)
{
@@ -6471,10 +6545,10 @@ add_key_field(JOIN *join,
@param field_item Field item used for comparison
@param eq_func True if we used =, <=> or IS NULL
@param value Value used for comparison with field_item
- @param num_values Number of values[] that we are comparing against
+ @param num_values Number of values[] that we are comparing against
@param usable_tables Tables which can be used for key optimization
@param sargables IN/OUT Array of found sargable candidates
- @param row_col_no if = n that > 0 then field is compared only
+ @param row_col_no if = n that > 0 then field is compared only
against the n-th component of row values
@note
@@ -7346,6 +7420,10 @@ static void remember_if_eq_ref_key(JOIN *join, KEYUSE *use)
Special treatment for ft-keys.
Update join->eq_ref_tables with a bitmap of all tables that can possible
have a EQ_REF key.
+
+ Note that the keys are generated to be used by best_access_path() during
+ the optimization stage. Unused keys will later be deleted by
+ JOIN::drop_unused_derived_keys().
*/
bool sort_and_filter_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse,
@@ -7395,7 +7473,9 @@ bool sort_and_filter_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse,
}
else
{
- /* Key changed, check if previous key was a primary/unique key lookup */
+ /*
+ Key changed, check if previous key was a primary/unique key lookup
+ */
if (prev != &key_end && !found_unprefixed_key_part)
remember_if_eq_ref_key(join, prev);
found_unprefixed_key_part= 0;
@@ -7676,7 +7756,9 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
{
join->positions[idx].table= table;
join->positions[idx].key=key;
- join->positions[idx].records_read=1.0; /* This is a const table */
+ join->positions[idx].records_read=1.0; /* This is a const table */
+ join->positions[idx].records_out=1.0; /* This is a const table */
+ join->positions[idx].records_init=1.0; /* This is a const table */
join->positions[idx].cond_selectivity= 1.0;
join->positions[idx].ref_depend_map= 0;
@@ -7701,24 +7783,30 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
/*
Estimate how many records we will get if we read just this table and apply
- a part of WHERE that can be checked for it.
+ a part of WHERE that can be checked using only the current table and
+ const tables.
- @detail
+ @param s Current JOIN_TAB
+ @param use_cond_selectivity Value of optimizer_use_condition_selectivity.
+ If > 1 then use table->cond_selecitivity.
+ @return 0.0 No matching rows
+ @return >= 1.0 Number of expected matching rows
+
+ @details
Estimate how many records we will get if we
- read the given table with its "independent" access method (either quick
select or full table/index scan),
- - apply the part of WHERE that refers only to this table.
+ - apply the part of WHERE that refers only to this table and const tables.
+ - The result cannot be bigger than table records
- @seealso
- table_cond_selectivity() produces selectivity of condition that is checked
- after joining rows from this table to rows from preceding tables.
+ @see also
+ table_after_join_selectivity() produces selectivity of condition that is
+ checked after joining rows from this table to rows from preceding tables.
*/
-inline
-double matching_candidates_in_table(JOIN_TAB *s, bool with_found_constraint,
- uint use_cond_selectivity)
+static double apply_selectivity_for_table(JOIN_TAB *s,
+ uint use_cond_selectivity)
{
- ha_rows records;
double dbl_records;
if (use_cond_selectivity > 1)
@@ -7726,88 +7814,246 @@ double matching_candidates_in_table(JOIN_TAB *s, bool with_found_constraint,
TABLE *table= s->table;
double sel= table->cond_selectivity;
double table_records= rows2double(s->records);
+ DBUG_ASSERT(sel >= 0 && sel <= 1.0);
+ /*
+ table->cond_selectivity will include data from opt_range.
+ Here we check that this is indeeded the case.
+ Note that if table_records == 0, then 'sel' is probably 1
+ */
+ DBUG_ASSERT(table_records == 0 ||
+ sel <= s->table->opt_range_condition_rows /
+ table_records);
dbl_records= table_records * sel;
- return dbl_records;
+ }
+ else
+ {
+ /*
+ This is only taking into considering constant key parts used with
+ this table!
+ If no such conditions existed the following should hold:
+ s->table->opt_range_condition_rows == s->found_rows ==
+ s->records.
+ */
+ DBUG_ASSERT(s->table->opt_range_condition_rows <= s->found_records);
+ dbl_records= rows2double(s->table->opt_range_condition_rows);
}
- records = s->found_records;
-
+ DBUG_ASSERT(dbl_records <= s->records);
/*
- If there is a filtering condition on the table (i.e. ref analyzer found
- at least one "table.keyXpartY= exprZ", where exprZ refers only to tables
- preceding this table in the join order we're now considering), then
- assume that 25% of the rows will be filtered out by this condition.
-
- This heuristic is supposed to force tables used in exprZ to be before
- this table in join order.
+ Ensure we return at least one row if there is any possibility to have
+ a matching row. Having rows >= 1.0 helps ensure that when we calculate
+ total rows of joins, the number of resulting rows will not be less
+ after the join. In other words, we assume there is at least one matching
+ row when joining a row with the next table.
+ 0.0 is returned only if it is guaranteed there are no matching rows
+ (for example if the table is empty).
*/
- if (with_found_constraint)
- records-= records/4;
+ return dbl_records ? MY_MAX(dbl_records, MIN_ROWS_AFTER_FILTERING) : 0.0;
+}
- /*
- If applicable, get a more accurate estimate. Don't use the two
- heuristics at once.
- */
- if (s->table->opt_range_condition_rows != s->found_records)
- records= s->table->opt_range_condition_rows;
- dbl_records= (double)records;
- return dbl_records;
+/*
+ Take into account that the table's WHERE clause has conditions on earlier
+ tables that can reduce the number of accepted rows.
+
+ @param records Number of original rows (after selectivity)
+
+ If there is a filtering condition on the table (i.e. ref analyzer found
+ at least one "table.keyXpartY= exprZ", where exprZ refers only to tables
+ preceding this table in the join order we're now considering), then
+ assume that 25% of the rows will be filtered out by this condition.
+
+ This heuristic is supposed to force tables used in exprZ to be before
+ this table in join order.
+*/
+
+static double use_found_constraint(double records)
+{
+ records-= records/4;
+ return records ? MY_MAX(records, MIN_ROWS_AFTER_FILTERING) : 0.0;
}
/*
Calculate the cost of reading a set of rows trough an index
+ @param eq_ref True if there is only one matching key (EQ_REF)
+
Logically this is identical to the code in multi_range_read_info_const()
excepts the function also takes into account io_blocks and multiple
ranges.
One main difference between the functions is that
multi_range_read_info_const() adds a very small cost per range
- (IDX_LOOKUP_COST) and also MULTI_RANGE_READ_SETUP_COST, to ensure that
- 'ref' is preferred slightly over ranges.
+ MULTI_RANGE_READ_SETUP_COST, to ensure that 'ref' is preferred
+ over ranges.
+
+ Note that this function assumes that index_only_cost is only to be
+ used with filtering (as cost.read_cost takes into account both
+ clustering and covered keys). index_only_cost does not include
+ KEY_COPY_COST as for filtering there is no copying of not accepted
+ keys.
+
+ If eq_ref is not set, it means that we have to do one extra 'read_next'
+ on the index to verify that there is not more keys with the same value.
+
+ WHERE_COST cost is not added to any result.
*/
-double cost_for_index_read(const THD *thd, const TABLE *table, uint key,
- ha_rows records, ha_rows worst_seeks)
+static ALL_READ_COST cost_for_index_read(const THD *thd, const TABLE *table,
+ uint key, ha_rows records,
+ bool eq_ref)
{
- DBUG_ENTER("cost_for_index_read");
- double cost;
+ ALL_READ_COST cost;
handler *file= table->file;
+ ha_rows max_seeks;
+ ha_rows extra_reads= eq_ref ? 0 : 1;
+ DBUG_ENTER("cost_for_index_read");
+
+ max_seeks= (ha_rows) thd->variables.max_seeks_for_key;
+ set_if_bigger(records, 1);
- set_if_smaller(records, (ha_rows) thd->variables.max_seeks_for_key);
if (file->is_clustering_key(key))
- cost= file->read_time(key, 1, records);
- else
- if (table->covering_keys.is_set(key))
- cost= file->keyread_time(key, 1, records);
+ {
+ cost.index_cost=
+ file->ha_keyread_clustered_time(key, 1, records+extra_reads, 0);
+ cost.copy_cost= rows2double(records) * file->ROW_COPY_COST;
+ /* There is no 'index_only_read' with a clustered index */
+ cost.row_cost= {0,0};
+ /* Caping of index_blocks will happen in handler::cost() */
+ cost.max_index_blocks= MY_MIN(file->row_blocks(), max_seeks);
+ cost.max_row_blocks= 0;
+ }
+ else if (table->covering_keys.is_set(key) && !table->no_keyread)
+ {
+ cost.index_cost= file->ha_keyread_time(key, 1, records + extra_reads, 0);
+ cost.row_cost= {0,0};
+ cost.copy_cost= rows2double(records) * file->KEY_COPY_COST;
+ cost.max_index_blocks= MY_MIN(file->index_blocks(key), max_seeks);
+ cost.max_row_blocks= 0;
+ }
else
- cost= ((file->keyread_time(key, 0, records) +
- file->read_time(key, 1, MY_MIN(records, worst_seeks))));
-
- DBUG_PRINT("statistics", ("cost: %.3f", cost));
+ {
+ cost.index_cost= file->ha_keyread_time(key, 1, records + extra_reads, 0);
+ /* ha_rnd_pos_time() includes time for copying the row */
+ cost.row_cost= file->ha_rnd_pos_time(records);
+ cost.max_index_blocks= MY_MIN(file->index_blocks(key), max_seeks);
+ cost.max_row_blocks= MY_MIN(file->row_blocks(), max_seeks);
+ cost.copy_cost= 0;
+ }
+ DBUG_PRINT("statistics", ("index_cost: %.3f row_cost: %.3f",
+ file->cost(cost.index_cost),
+ file->cost(cost.row_cost)));
DBUG_RETURN(cost);
}
-/*
- Adjust cost from table->quick_costs calculated by
- multi_range_read_info_const() to be comparable with cost_for_index_read()
-
- This functions is needed because best_access_path() doesn't add
- TIME_FOR_COMPARE to it's costs until very late.
- Preferably we should fix so that all costs are comparably.
- (All compared costs should include TIME_FOR_COMPARE for all found
- rows).
+/**
+ Apply filter if the filter is better than the current cost
+
+ @param thd Thread handler
+ @param table Table
+ @param cost Pointer to cost for current cost, which does not
+ include WHERE_COST cost. Will be updated to
+ new cost if filter is chosen.
+ Will be updated to new cost if filter is used.
+ @param records_arg Pointer to number of records for the current key.
+ Will be updated to records after filter, if filter is
+ used.
+ @param startup_cost Startup cost. Will be updated if filter is used.
+ @param fetch_cost Cost of finding the row, without where compare cost
+ @param index_only_cost Cost if fetching '*records_arg' key values
+ @param prev_records Number of record combinations in previous tables
+
+ @return 'this' Filter is used (and variables are updated)
+ @return 0 Filter is worse than old plan
*/
-double adjust_quick_cost(double quick_cost, ha_rows records)
+Range_rowid_filter_cost_info* Range_rowid_filter_cost_info::
+apply_filter(THD *thd, TABLE *table, ALL_READ_COST *cost,
+ double *records_arg,
+ double *startup_cost,
+ uint ranges, double prev_records)
{
- double cost= (quick_cost - MULTI_RANGE_READ_SETUP_COST -
- rows2double(records)/TIME_FOR_COMPARE);
- DBUG_ASSERT(cost > 0.0);
- return cost;
+ handler *file= table->file;
+ bool use_filter;
+ double new_cost, org_cost, records= *records_arg, new_records;
+ double filter_startup_cost= get_setup_cost();
+ double filter_lookup_cost= records * lookup_cost();
+ double tmp;
+ ALL_READ_COST adjusted_cost;
+
+ /*
+ Calculate number of resulting rows after filtering
+ Here we trust selectivity and do not adjust rows up even if
+ the end result is low. This means that new_records is allowed to be
+ be < 1.0
+ */
+ new_records= records * selectivity;
+
+ /*
+ Calculate the cost of the filter based on that we had originally
+ 'records' rows and after the filter only 'new_records' accepted
+ rows.
+ Note that the rejected rows, we have only done a key read. We only
+ fetch the row and compare the where if the filter accepts the
+ row id.
+ In case of index only read, fetch_cost == index_only_cost. Even in this
+ the filter can give a better plan as we have to do less comparisons
+ with the WHERE clause.
+
+ The io_cost is used to take into account that we have to do 1 key
+ lookup to find the first matching key in each range.
+ */
+
+ adjusted_cost= *cost;
+ /* We are going to read 'selectivity' fewer rows */
+ adjusted_cost.row_cost.io*= selectivity;
+ adjusted_cost.row_cost.cpu*= selectivity;
+ adjusted_cost.copy_cost*= selectivity; // Cost of copying row or key
+ adjusted_cost.index_cost.cpu+= filter_lookup_cost;
+
+ tmp= prev_records * WHERE_COST_THD(thd);
+ org_cost= (file->cost_for_reading_multiple_times(prev_records,
+ cost) +
+ records * tmp);
+
+ new_cost= (file->cost_for_reading_multiple_times(prev_records,
+ &adjusted_cost) +
+ new_records * tmp + filter_startup_cost);
+
+ DBUG_ASSERT(new_cost >= 0 && new_records >= 0);
+ use_filter= new_cost < org_cost;
+
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_filter(thd, "filter");
+ trace_filter.add("rowid_filter_index",
+ table->key_info[get_key_no()].name).
+ add("index_only_cost", file->cost(cost->index_cost)).
+ add("filter_startup_cost", filter_startup_cost).
+ add("find_key_and_filter_lookup_cost", filter_lookup_cost).
+ add("filter_selectivity", selectivity).
+ add("original_rows", records).
+ add("new_rows", new_records).
+ add("original_access_cost", file->cost(cost)).
+ add("with_filter_access_cost", file->cost(&adjusted_cost)).
+ add("original_found_rows_cost", file->cost(cost->row_cost)).
+ add("with_filter_found_rows_cost", file->cost(adjusted_cost.row_cost)).
+ add("org_cost", org_cost).
+ add("filter_cost", new_cost).
+ add("filter_used", use_filter);
+ }
+ if (use_filter)
+ {
+ cost->row_cost= adjusted_cost.row_cost;
+ cost->index_cost= adjusted_cost.index_cost;
+ cost->copy_cost= adjusted_cost.copy_cost;
+ *records_arg= new_records;
+ (*startup_cost)+= filter_startup_cost;
+ return this;
+ }
+ return 0;
}
@@ -7817,7 +8063,7 @@ double adjust_quick_cost(double quick_cost, ha_rows records)
The function finds the best access path to table 's' from the passed
partial plan where an access path is the general term for any means to
- access the data in 's'. An access path may use either an index or a scan,
+ cacess the data in 's'. An access path may use either an index or a scan,
whichever is cheaper. The input partial plan is passed via the array
'join->positions' of length 'idx'. The chosen access method for 's' and its
cost are stored in 'join->positions[idx]'.
@@ -7839,6 +8085,28 @@ double adjust_quick_cost(double quick_cost, ha_rows records)
None
*/
+struct best_plan
+{
+ double cost; // Smallest cost found
+ double records; // Old 'Records'
+ double records_read; // Records accessed
+ double records_after_filter; // Records_read + filter
+ double records_out; // Smallest record count seen
+ double prev_record_reads; // Save value from prev_record_reads
+ double identical_keys; // Save value from prev_record_reads
+ Range_rowid_filter_cost_info *filter; // Best filter
+ KEYUSE *key; // Best key
+ SplM_plan_info *spl_plan;
+ table_map ref_depends_map;
+ ulonglong refills; // Join cache refills
+ enum join_type type;
+ uint forced_index;
+ uint max_key_part;
+ table_map found_ref;
+ bool use_join_buffer;
+};
+
+
void
best_access_path(JOIN *join,
JOIN_TAB *s,
@@ -7851,15 +8119,11 @@ best_access_path(JOIN *join,
POSITION *loose_scan_pos)
{
THD *thd= join->thd;
- uint use_cond_selectivity= thd->variables.optimizer_use_condition_selectivity;
- KEYUSE *best_key= 0;
- uint best_max_key_part= 0;
+ uint use_cond_selectivity=
+ thd->variables.optimizer_use_condition_selectivity;
+ TABLE *table= s->table;
+ handler *file= table->file;
my_bool found_constraint= 0;
- double best= DBL_MAX;
- double best_time= DBL_MAX;
- double records= DBL_MAX;
- ha_rows records_for_key= 0;
- table_map best_ref_depends_map= 0;
/*
key_dependent is 0 if all key parts could be used or if there was an
EQ_REF table found (which uses all key parts). In other words, we cannot
@@ -7867,24 +8131,43 @@ best_access_path(JOIN *join,
Otherwise it's a bitmap of tables that could improve key usage.
*/
table_map key_dependent= 0;
- Range_rowid_filter_cost_info *best_filter= 0;
- double tmp;
- double keyread_tmp= 0;
+ ALL_READ_COST tmp;
ha_rows rec;
- bool best_uses_jbuf= FALSE;
MY_BITMAP *eq_join_set= &s->table->eq_join_set;
KEYUSE *hj_start_key= 0;
- SplM_plan_info *spl_plan= 0;
- Range_rowid_filter_cost_info *filter= 0;
- const char* cause= NULL;
- enum join_type best_type= JT_UNKNOWN, type= JT_UNKNOWN;
-
- disable_jbuf= disable_jbuf || idx == join->const_tables;
-
Loose_scan_opt loose_scan_opt;
+ struct best_plan best;
+ Json_writer_object trace_wrapper(thd, "best_access_path");
DBUG_ENTER("best_access_path");
- Json_writer_object trace_wrapper(thd, "best_access_path");
+ /*
+ Assume that there is at least one accepted row from previous table
+ combinations.
+ This fixes a problem when the selectivity for the preceding table
+ combinations becomes so high that record_count becomes << 1.0,
+ which makes the cost for the current table so low that it does not
+ matter when calculating the best plans.
+ */
+ set_if_bigger(record_count, 1.0);
+
+ best.cost= DBL_MAX;
+ best.records= DBL_MAX;
+ best.records_read= DBL_MAX;
+ best.records_after_filter= DBL_MAX;
+ best.records_out= table->stat_records() * table->cond_selectivity;
+ best.prev_record_reads= best.identical_keys= 0;
+ best.filter= 0;
+ best.key= 0;
+ best.max_key_part= 0;
+ best.type= JT_UNKNOWN;
+ best.forced_index= MAX_KEY;
+ best.found_ref= 0;
+ best.ref_depends_map= 0;
+ best.refills= 0;
+ best.use_join_buffer= FALSE;
+ best.spl_plan= 0;
+
+ disable_jbuf= disable_jbuf || idx == join->const_tables;
trace_wrapper.add_table_name(s);
@@ -7892,36 +8175,48 @@ best_access_path(JOIN *join,
loose_scan_opt.init(join, s, remaining_tables);
- if (s->table->is_splittable())
- spl_plan= s->choose_best_splitting(record_count, remaining_tables);
+ if (table->is_splittable())
+ best.spl_plan= s->choose_best_splitting(record_count, remaining_tables);
+
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object info(thd, "plan_details");
+ info.add("record_count", record_count);
+ }
Json_writer_array trace_paths(thd, "considered_access_paths");
if (s->keyuse)
{ /* Use key if possible */
- KEYUSE *keyuse;
- KEYUSE *start_key=0;
- TABLE *table= s->table;
- double best_records= DBL_MAX;
+ KEYUSE *keyuse, *start_key= 0;
uint max_key_part=0;
+ enum join_type type= JT_UNKNOWN;
+ double cur_cost, copy_cost, cached_prev_record_reads= 0.0;
+ table_map cached_prev_ref= ~(table_map) 0;
/* Test how we can use keys */
rec= s->records/MATCHING_ROWS_IN_OTHER_TABLE; // Assumed records/key
for (keyuse=s->keyuse ; keyuse->table == table ;)
{
KEY *keyinfo;
+ const char *cause= NULL;
ulong key_flags;
uint key_parts;
key_part_map found_part= 0;
- key_part_map notnull_part=0; // key parts which won't have NULL in lookup tuple.
+ /* key parts which won't have NULL in lookup tuple */
+ key_part_map notnull_part=0;
table_map found_ref= 0;
uint key= keyuse->key;
- filter= 0;
bool ft_key= (keyuse->keypart == FT_KEYPART);
/* Bitmap of keyparts where the ref access is over 'keypart=const': */
key_part_map const_part= 0;
/* The or-null keypart in ref-or-null access: */
key_part_map ref_or_null_part= 0;
key_part_map all_parts= 0;
+ double startup_cost= s->startup_cost;
+ double records_after_filter, records_best_filter, records;
+ Range_rowid_filter_cost_info *filter= 0;
+ double prev_record_count= record_count;
+ double identical_keys= 0;
if (is_hash_join_key_no(key))
{
@@ -7960,9 +8255,10 @@ best_access_path(JOIN *join,
do /* For each way to access the keypart */
{
/*
- if 1. expression doesn't refer to forward tables
+ If 1. expression does not refer to forward tables
2. we won't get two ref-or-null's
*/
+ double ignore;
all_parts|= keyuse->keypart_map;
if (!(remaining_tables & keyuse->used_tables) &&
(!keyuse->validity_ref || *keyuse->validity_ref) &&
@@ -7979,12 +8275,19 @@ best_access_path(JOIN *join,
if (!keyuse->val->maybe_null() || keyuse->null_rejecting)
notnull_part|=keyuse->keypart_map;
- double tmp2= prev_record_reads(join_positions, idx,
- (found_ref | keyuse->used_tables));
- if (tmp2 < best_prev_record_reads)
+ if ((found_ref | keyuse->used_tables) != cached_prev_ref)
{
- best_part_found_ref= keyuse->used_tables & ~join->const_table_map;
- best_prev_record_reads= tmp2;
+ cached_prev_ref= (found_ref | keyuse->used_tables);
+ cached_prev_record_reads=
+ prev_record_reads(join_positions, idx,
+ cached_prev_ref, record_count,
+ &ignore);
+ }
+ if (cached_prev_record_reads < best_prev_record_reads)
+ {
+ best_prev_record_reads= cached_prev_record_reads;
+ best_part_found_ref= (keyuse->used_tables &
+ ~join->const_table_map);
}
if (rec > keyuse->ref_table_rows)
rec= keyuse->ref_table_rows;
@@ -7994,6 +8297,16 @@ best_access_path(JOIN *join,
*/
if (keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL)
ref_or_null_part |= keyuse->keypart_map;
+
+ /*
+ Remember if there is a WHERE condition that contains
+ 'key_part=expression_with_only_accessible_tables'
+ We ignore const tables as these are handled by selectivity
+ code (const table fields are treated as constants).
+ */
+ found_constraint|= (keyuse->used_tables &
+ ~(remaining_tables |
+ join->const_table_map));
}
else if (!(found_part & keyuse->keypart_map))
key_parts_dependent|= keyuse->used_tables;
@@ -8006,6 +8319,7 @@ best_access_path(JOIN *join,
if (all_parts & 1)
key_dependent|= key_parts_dependent;
found_ref|= best_part_found_ref;
+ /* Remember if the key expression used previous non const tables */
} while (keyuse->table == table && keyuse->key == key);
/*
@@ -8018,25 +8332,42 @@ best_access_path(JOIN *join,
rec= MATCHING_ROWS_IN_OTHER_TABLE; // Fix for small tables
Json_writer_object trace_access_idx(thd);
- double eq_ref_rows= 0.0, eq_ref_cost= 0.0;
/*
full text keys require special treatment
*/
if (ft_key)
{
/*
- Really, there should be records=0.0 (yes!)
- but 1.0 would be probably safer
+ Fulltext indexes are preformed the following way:
+ - In the prepare step it performs the search, collects all positions
+ in an array, sorts it.
+ - If optimizer decides to use the ft index access method it simply'
+ returns positions from the array one by one
+ - If optimizer decides to use something else (another index, table
+ scan), then it'll use binary search in the array to find the
+ position.
+
+ The following code puts the cost down to very small as the prep
+ step will always be done and the cost to fetch the row from memory
+ is very small.
+ Alternatively we could use the cost of an EQ_REF here.
+ */
+ tmp.reset();
+ tmp.row_cost.cpu= file->ROW_COPY_COST;
+ /*
+ We don't know how many records will match. However, we want to have
+ the fulltext search done early, so we put the number of records
+ to be very low.
*/
- tmp= prev_record_reads(join_positions, idx, found_ref);
records= 1.0;
type= JT_FT;
- trace_access_idx.add("access_type", join_type_str[type])
- .add("full-text index", keyinfo->name);
+ if (unlikely(trace_access_idx.trace_started()))
+ trace_access_idx.
+ add("access_type", join_type_str[type]).
+ add("full-text index", keyinfo->name);
}
else
{
- found_constraint= MY_TEST(found_part);
loose_scan_opt.check_ref_access_part1(s, key, start_key, found_part);
/* Check if we found full key */
@@ -8050,31 +8381,42 @@ best_access_path(JOIN *join,
- equalities we are using reject NULLs (3)
then the estimate is rows=1.
*/
- if ((key_flags & (HA_NOSAME | HA_EXT_NOSAME)) && // (1)
+ if ((key_flags & (HA_NOSAME | HA_EXT_NOSAME)) && // (1)
(!(key_flags & HA_NULL_PART_KEY) || // (2)
all_key_parts == notnull_part)) // (3)
{
/* Check that eq_ref_tables are correctly updated */
DBUG_ASSERT(join->eq_ref_tables & table->map);
- /* TODO: Adjust cost for covering and clustering key */
type= JT_EQ_REF;
- trace_access_idx.add("access_type", join_type_str[type])
- .add("index", keyinfo->name);
+ if (unlikely(trace_access_idx.trace_started()))
+ trace_access_idx.
+ add("access_type", join_type_str[type]).
+ add("index", keyinfo->name);
if (!found_ref && table->opt_range_keys.is_set(key))
- tmp= adjust_quick_cost(table->opt_range[key].cost, 1);
+ {
+ /* Ensure that the cost is identical to the range cost */
+ table->opt_range[key].get_costs(&tmp);
+ }
else
- tmp= table->file->avg_io_cost();
- eq_ref_rows= prev_record_reads(join_positions, idx,
- found_ref);
- tmp*= eq_ref_rows;
- eq_ref_cost= tmp;
- records=1.0;
+ {
+ tmp= cost_for_index_read(thd, table, key, 1, 1);
+ }
+ /*
+ Calculate how many record read calls will be made taking
+ into account that we will cache the last read row.
+ */
+ prev_record_count= prev_record_reads(join_positions, idx,
+ found_ref, record_count,
+ &identical_keys);
+ records= 1.0;
}
else
{
type= JT_REF;
- trace_access_idx.add("access_type", join_type_str[type])
- .add("index", keyinfo->name);
+ if (unlikely(trace_access_idx.trace_started()))
+ trace_access_idx.
+ add("access_type", join_type_str[type]).
+ add("index", keyinfo->name);
if (!found_ref)
{ /* We found a const key */
/*
@@ -8096,19 +8438,19 @@ best_access_path(JOIN *join,
*/
if (table->opt_range_keys.is_set(key))
{
+ /* Ensure that the cost is identical to the range cost */
records= (double) table->opt_range[key].rows;
trace_access_idx.add("used_range_estimates", true);
- tmp= adjust_quick_cost(table->opt_range[key].cost,
- table->opt_range[key].rows);
- goto got_cost;
- }
- else
- {
- /* quick_range couldn't use key! */
- records= (double) s->records/rec;
- trace_access_idx.add("used_range_estimates", false)
- .add("reason", "not available");
+
+ table->opt_range[key].get_costs(&tmp);
+ goto got_cost2;
}
+ /* quick_range couldn't use key! */
+ records= (double) s->records/rec;
+ if (unlikely(trace_access_idx.trace_started()))
+ trace_access_idx.
+ add("used_range_estimates", false).
+ add("reason", "not available");
}
else
{
@@ -8120,9 +8462,11 @@ best_access_path(JOIN *join,
(1.0 +
((double) (table->s->max_key_length-keyinfo->key_length) /
(double) table->s->max_key_length)));
- if (records < 2.0)
- records=2.0; /* Can't be as good as a unique */
+ set_if_smaller(records, (double)s->records);
+ if (records < 1.0)
+ records= 1.0; /* Can't be as good as a unique */
}
+
/*
ReuseRangeEstimateForRef-2: We get here if we could not reuse
E(#rows) from range optimizer. Make another try:
@@ -8143,44 +8487,46 @@ best_access_path(JOIN *join,
records= (double) table->opt_range[key].rows;
trace_access_idx.add("used_range_estimates", "clipped down");
}
- else
+ else if (unlikely(trace_access_idx.trace_started()))
{
- trace_access_idx.add("used_range_estimates", false);
if (table->opt_range_keys.is_set(key))
{
- trace_access_idx.add("reason", "not better than ref estimates");
+ trace_access_idx.
+ add("used_range_estimates",false).
+ add("reason", "not better than ref estimates");
}
else
{
- trace_access_idx.add("reason", "not available");
+ trace_access_idx.
+ add("used_range_estimates", false).
+ add("reason", "not available");
}
}
}
- /* Limit the number of matched rows */
- tmp= cost_for_index_read(thd, table, key, (ha_rows) records,
- (ha_rows) s->worst_seeks);
- records_for_key= (ha_rows) records;
- set_if_smaller(records_for_key, thd->variables.max_seeks_for_key);
- keyread_tmp= table->file->keyread_time(key, 1, records_for_key);
- got_cost:
- tmp= COST_MULT(tmp, record_count);
- keyread_tmp= COST_MULT(keyread_tmp, record_count);
+ /* Calculate the cost of the index access */
+ tmp= cost_for_index_read(thd, table, key,
+ (ha_rows) records, 0);
}
}
else
{
type = ref_or_null_part ? JT_REF_OR_NULL : JT_REF;
- trace_access_idx.add("access_type", join_type_str[type])
- .add("index", keyinfo->name);
+ if (unlikely(trace_access_idx.trace_started()))
+ trace_access_idx.
+ add("access_type", join_type_str[type]).
+ add("index", keyinfo->name);
/*
Use as much key-parts as possible and a uniq key is better
than a not unique key
- Set tmp to (previous record count) * (records / combination)
+ Set tmp to the cost of the accessing the expected number of
+ records.
*/
if ((found_part & 1) &&
- (!(table->file->index_flags(key, 0, 0) & HA_ONLY_WHOLE_INDEX) ||
+ (!(table->key_info[key].index_flags & HA_ONLY_WHOLE_INDEX) ||
found_part == PREV_BITS(uint,keyinfo->user_defined_key_parts)))
{
+ double extra_cost= 0;
+
max_key_part= max_part_bit(found_part);
/*
ReuseRangeEstimateForRef-3:
@@ -8206,7 +8552,7 @@ best_access_path(JOIN *join,
We also have a property that "range optimizer produces equal or
tighter set of scan intervals than ref(const) optimizer". Each
of the intervals in (**) are "tightest possible" intervals when
- one limits itself to using keyparts 1..K (which we do in #2).
+ one limits itself to using keyparts 1..K (which we do in #2).
From here it follows that range access used either one, or
both of the (I1) and (I2) intervals:
@@ -8223,11 +8569,15 @@ best_access_path(JOIN *join,
*/
if (table->opt_range_keys.is_set(key) && !found_ref && //(C1)
table->opt_range[key].key_parts == max_key_part && //(C2)
- table->opt_range[key].ranges == 1 + MY_TEST(ref_or_null_part)) //(C3)
+ (table->opt_range[key].ranges ==
+ 1 + MY_TEST(ref_or_null_part))) //(C3)
{
records= (double) table->opt_range[key].rows;
- tmp= adjust_quick_cost(table->opt_range[key].cost,
- table->opt_range[key].rows);
+ table->opt_range[key].get_costs(&tmp);
+ /*
+ TODO: Disable opt_range testing below for this range as we can
+ always use this ref instead.
+ */
trace_access_idx.add("used_range_estimates", true);
goto got_cost2;
}
@@ -8254,16 +8604,34 @@ best_access_path(JOIN *join,
*/
if (table->opt_range_keys.is_set(key))
{
+ double rows;
if (table->opt_range[key].key_parts >= max_key_part) // (2)
{
- double rows= (double) table->opt_range[key].rows;
- if (!found_ref && // (1)
- records < rows) // (3)
+ /*
+ Choose range over REF in the case range will always be
+ as good or better than REF.
+ This is the case when we have only one const range
+ and it consist of more parts than what we used for REF.
+ */
+ if (!found_ref &&
+ table->opt_range[key].key_parts > max_key_part &&
+ table->opt_range[key].ranges <=
+ (uint) (1 + MY_TEST(ref_or_null_part)))
{
- trace_access_idx.add("used_range_estimates", "clipped up");
- records= rows;
+ trace_access_idx.
+ add("chosen", false).
+ add("cause", "range is simple and more selective");
+ continue; // continue with next key
}
}
+ rows= (double) table->opt_range[key].rows;
+ if (!found_ref && // (1)
+ records < rows) // (3)
+ {
+ trace_access_idx.add("used_range_estimates",
+ "clipped up");
+ records= rows;
+ }
}
}
else
@@ -8300,8 +8668,8 @@ best_access_path(JOIN *join,
a*keyinfo->user_defined_key_parts - rec_per_key)/
(keyinfo->user_defined_key_parts-1);
else
- records= a;
- set_if_bigger(records, 1.0);
+ records= rows2double(s->records);
+ set_if_bigger(records, MIN_ROWS_AFTER_FILTERING);
}
}
@@ -8309,6 +8677,7 @@ best_access_path(JOIN *join,
{
/* We need to do two key searches to find row */
records *= 2.0;
+ extra_cost= s->table->file->KEY_LOOKUP_COST;
}
/*
@@ -8337,56 +8706,39 @@ best_access_path(JOIN *join,
}
}
- /* Limit the number of matched rows */
- tmp= cost_for_index_read(thd, table, key, (ha_rows) records,
- (ha_rows) s->worst_seeks);
- records_for_key= (ha_rows) records;
- set_if_smaller(records_for_key, thd->variables.max_seeks_for_key);
- keyread_tmp= table->file->keyread_time(key, 1, records_for_key);
- got_cost2:
- tmp= COST_MULT(tmp, record_count);
- keyread_tmp= COST_MULT(keyread_tmp, record_count);
+ set_if_smaller(records, (double) s->records);
+ tmp= cost_for_index_read(thd, table, key, (ha_rows)records, 0);
+ tmp.copy_cost+= extra_cost;
}
else
{
if (!(found_part & 1))
cause= "no predicate for first keypart";
- tmp= best_time; // Do nothing
+ else
+ cause= "No full key found";
+ trace_access_idx.add("chosen", false).add("cause", cause);
+ continue;
}
}
- tmp= COST_ADD(tmp, s->startup_cost);
- loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp,
+ got_cost2:
+ loose_scan_opt.check_ref_access_part2(key, start_key, records,
+ file->cost(&tmp) + startup_cost,
found_ref);
} /* not ft_key */
- if (records < DBL_MAX &&
- (found_part & 1) && // start_key->key can be used for index access
- (table->file->index_flags(start_key->key,0,1) &
- HA_DO_RANGE_FILTER_PUSHDOWN))
+ if (records == DBL_MAX) // Key not usable
+ continue;
+
+ records_best_filter= records_after_filter= records;
+
+ /*
+ Check if we can use a filter.
+ Records can be 0 in case of empty tables.
+ */
+ if ((found_part & 1) && records &&
+ table->can_use_rowid_filter(start_key->key))
{
- double rows;
- if (type == JT_EQ_REF)
- {
- /*
- Treat EQ_REF access in a special way:
- 1. We have no cost for index-only read. Assume its cost is 50% of
- the cost of the full read.
-
- 2. A regular ref access will do #record_count lookups, but eq_ref
- has "lookup cache" which reduces the number of lookups made.
- The estimation code uses prev_record_reads() call to estimate:
-
- tmp = prev_record_reads(join_positions, idx, found_ref);
-
- Set the effective number of rows from "tmp" here.
- */
- keyread_tmp= COST_ADD(eq_ref_cost / 2, s->startup_cost);
- rows= eq_ref_rows;
- }
- else
- rows= record_count * records;
-
/*
If we use filter F with selectivity s the the cost of fetching data
by key using this filter will be
@@ -8407,7 +8759,6 @@ best_access_path(JOIN *join,
Here we have:
cost_of_fetching_1_row = tmp/rows
cost_of_fetching_1_key_tuple = keyread_tmp/rows
-
access_cost_factor is the gain we expect for using rowid filter.
An access_cost_factor of 1.0 means that keyread_tmp is 0
(using key read is infinitely fast) and the gain for each row when
@@ -8428,50 +8779,89 @@ best_access_path(JOIN *join,
we cannot use filters as the cost calculation below would cause
tmp to become negative. The future resultion is to not limit
cost with worst_seek.
- */
- double access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0);
- if (!(records < s->worst_seeks &&
- records <= thd->variables.max_seeks_for_key))
- trace_access_idx.add("rowid_filter_skipped", "worst/max seeks clipping");
- else if (access_cost_factor <= 0.0)
- trace_access_idx.add("rowid_filter_skipped", "cost_factor <= 0");
- else
- {
- filter=
- table->best_range_rowid_filter_for_partial_join(start_key->key,
- rows,
- access_cost_factor);
- if (filter)
- {
- tmp-= filter->get_adjusted_gain(rows) - filter->get_cmp_gain(rows);
- DBUG_ASSERT(tmp >= 0);
- trace_access_idx.add("rowid_filter_key",
- table->key_info[filter->key_no].name);
- }
- }
+
+ We cannot use filter with JT_EQ_REF as in this case 'tmp' is
+ number of rows from prev_record_read() and keyread_tmp is 0. These
+ numbers are not usable with rowid filter code.
+ */
+ filter= table->best_range_rowid_filter(start_key->key,
+ records,
+ file->cost(&tmp),
+ file->cost(tmp.index_cost),
+ prev_record_count,
+ &records_best_filter);
+ set_if_smaller(best.records_out, records_best_filter);
+ if (filter)
+ filter= filter->apply_filter(thd, table, &tmp,
+ &records_after_filter,
+ &startup_cost,
+ 1, prev_record_count);
}
- trace_access_idx.add("rows", records).add("cost", tmp);
- if (tmp + 0.0001 < best_time - records/TIME_FOR_COMPARE)
+ /*
+ Take into account WHERE and setup cost.
+ We have to check the WHERE for all previous row combinations
+ (record_count).
+ 'prev_record_count' is either 'record_count', or in case of
+ EQ_REF the estimated number of index_read() calls to the
+ engine when taking the one row read cache into account.
+ */
+ copy_cost= (record_count * records_after_filter * WHERE_COST_THD(thd) +
+ startup_cost);
+
+ cur_cost= (file->cost_for_reading_multiple_times(prev_record_count, &tmp) +
+ copy_cost);
+
+ if (unlikely(trace_access_idx.trace_started()))
{
- trace_access_idx.add("chosen", true);
- best_time= COST_ADD(tmp, records/TIME_FOR_COMPARE);
- best= tmp;
- best_records= records;
- best_key= start_key;
- best_max_key_part= max_key_part;
- best_ref_depends_map= found_ref;
- best_filter= filter;
- best_type= type;
+ if (prev_record_count != record_count)
+ trace_access_idx.add("prev_record_count", prev_record_count);
+ trace_access_idx.
+ add("rows", records_after_filter).
+ add("cost", cur_cost);
}
- else
+
+ /*
+ The COST_EPS is here to ensure we use the first key if there are
+ two 'identical keys' that could be used.
+ */
+ if (cur_cost + COST_EPS < best.cost)
{
- trace_access_idx.add("chosen", false)
- .add("cause", cause ? cause : "cost");
- }
- cause= nullptr;
+ trace_access_idx.add("chosen", true);
+ best.cost= cur_cost;
+ /*
+ We use 'records' instead of 'records_after_filter' here as we want
+ to have EXPLAIN print the number of rows found by the key access.
+ */
+ best.records= records; // Records before filter!
+ best.records_read= records;
+ /*
+ If we are using 'use_cond_selectivity > 1' then
+ table_after_join_selectivity() may take into account other
+ filters that what is currently used so we have to use
+ records_after_filter. If 'use_cond_selectivity <= 1 then we
+ can use information from the best filter.
+ */
+ best.records_after_filter= ((use_cond_selectivity > 1) ?
+ records_after_filter :
+ records_best_filter);
+ best.prev_record_reads= prev_record_count;
+ best.identical_keys= identical_keys;
+ best.key= start_key;
+ best.found_ref= found_ref;
+ best.max_key_part= max_key_part;
+ best.ref_depends_map= found_ref;
+ best.filter= filter;
+ best.type= type;
+ }
+ else if (unlikely(thd->trace_started()))
+ {
+ trace_access_idx.
+ add("chosen", false).
+ add("cause", cause ? cause : "cost");
+ }
+ set_if_smaller(best.records_out, records);
} /* for each key */
- records= best_records;
}
else
{
@@ -8490,15 +8880,19 @@ best_access_path(JOIN *join,
*/
if (s->key_start_dependent)
key_dependent= s->key_dependent;
- /* Add dependencey for sub queries */
+
+ /* Add dependency for sub queries */
key_dependent|= s->embedded_dependent;
- }
+
+ } /* if (s->keyuse) */
+
+
/* Check that s->key_dependent contains all used_tables found in s->keyuse */
key_dependent&= ~PSEUDO_TABLE_BITS;
DBUG_ASSERT((key_dependent & (s->key_dependent | s->embedded_dependent)) ==
key_dependent);
- /*
+ /*
If there is no key to access the table, but there is an equi-join
predicate connecting the table with the privious tables then we
consider the possibility of using hash join.
@@ -8506,46 +8900,81 @@ best_access_path(JOIN *join,
(1) s is inner table of semi-join -> join cache is allowed for semijoins
(2) s is inner table of outer join -> join cache is allowed for outer joins
*/
- if (idx > join->const_tables && best_key == 0 &&
+ if (idx > join->const_tables && best.key == 0 &&
(join->allowed_join_cache_types & JOIN_CACHE_HASHED_BIT) &&
join->max_allowed_join_cache_level > 2 &&
!bitmap_is_clear_all(eq_join_set) && !disable_jbuf &&
(!s->emb_sj_nest ||
join->allowed_semijoin_with_cache) && // (1)
- (!(s->table->map & join->outer_join) ||
+ (!(table->map & join->outer_join) ||
join->allowed_outer_join_with_cache)) // (2)
{
- double join_sel= 0.1;
- /* Estimate the cost of the hash join access to the table */
- double rnd_records= matching_candidates_in_table(s, found_constraint,
- use_cond_selectivity);
+ double refills, row_copy_cost, cmp_time, cur_cost, records_table_filter;
+ /* Estimate the cost of the hash join access to the table */
+ double rnd_records= apply_selectivity_for_table(s, use_cond_selectivity);
+ records_table_filter= ((found_constraint) ?
+ use_found_constraint(rnd_records) :
+ rnd_records);
+
+ DBUG_ASSERT(rnd_records <= rows2double(s->found_records) + 0.5);
+ set_if_smaller(best.records_out, records_table_filter);
- tmp= s->quick ? s->quick->read_time : s->scan_time();
- double cmp_time= (s->records - rnd_records)/TIME_FOR_COMPARE;
- tmp= COST_ADD(tmp, cmp_time);
+ /*
+ The following cost calculation is identical to the cost calculation for
+ the join cache later on, except for the HASH_FANOUT
+ */
+ if (s->quick)
+ {
+ /*
+ Cost of reading rows through opt_range including comparing the rows
+ with the attached WHERE clause.
+ */
+ cur_cost= s->quick->read_time;
+ }
+ else
+ cur_cost= s->cached_scan_and_compare_time;
/* We read the table as many times as join buffer becomes full. */
+ refills= (1.0 + floor((double) cache_record_length(join,idx) *
+ record_count /
+ (double) thd->variables.join_buff_size));
+ cur_cost= COST_MULT(cur_cost, refills);
- double refills= (1.0 + floor((double) cache_record_length(join,idx) *
- record_count /
- (double) thd->variables.join_buff_size));
- tmp= COST_MULT(tmp, refills);
- best_time= COST_ADD(tmp,
- COST_MULT((record_count*join_sel) / TIME_FOR_COMPARE,
- rnd_records));
- best= tmp;
- records= rnd_records;
- best_key= hj_start_key;
- best_ref_depends_map= 0;
- best_uses_jbuf= TRUE;
- best_filter= 0;
- best_type= JT_HASH;
+ /*
+ Cost of doing the hash lookup and check all matching rows with the
+ WHERE clause.
+ We assume here that, thanks to the hash, we don't have to compare all
+ row combinations, only a HASH_FANOUT (10%) rows in the cache.
+ */
+ row_copy_cost= (ROW_COPY_COST_THD(thd) *
+ JOIN_CACHE_ROW_COPY_COST_FACTOR(thd));
+ cmp_time= (record_count * row_copy_cost +
+ rnd_records * record_count * HASH_FANOUT *
+ ((idx - join->const_tables) * row_copy_cost +
+ WHERE_COST_THD(thd)));
+ cur_cost= COST_ADD(cur_cost, cmp_time);
+
+ best.cost= cur_cost;
+ best.records_read= best.records_after_filter= rows2double(s->records);
+ best.records= rnd_records;
+#ifdef NOT_YET
+ set_if_smaller(best.records_out, rnd_records * HASH_FANOUT);
+#endif
+ best.key= hj_start_key;
+ best.ref_depends_map= 0;
+ best.use_join_buffer= TRUE;
+ best.filter= 0;
+ best.type= JT_HASH;
+ best.refills= (ulonglong) ceil(refills);
Json_writer_object trace_access_hash(thd);
- trace_access_hash.add("type", "hash");
- trace_access_hash.add("index", "hj-key");
- trace_access_hash.add("rnd_records", rnd_records);
- trace_access_hash.add("cost", best);
- trace_access_hash.add("chosen", true);
+ if (unlikely(trace_access_hash.trace_started()))
+ trace_access_hash.
+ add("type", "hash").
+ add("index", "hj-key").
+ add("rows", rnd_records).
+ add("refills", refills).
+ add("cost", best.cost).
+ add("chosen", true);
}
/*
@@ -8584,19 +9013,27 @@ best_access_path(JOIN *join,
be used for cases with small datasets, which is annoying.
*/
Json_writer_object trace_access_scan(thd);
- if ((records >= s->found_records || best > s->read_time) && // (1)
- !(best_key && best_key->key == MAX_KEY) && // (2)
+ if ((best.records_read >= s->found_records ||
+ best.cost > s->read_time) && // (1)
+ !(best.key && best.key->key == MAX_KEY) && // (2)
!(s->quick &&
s->quick->get_type() != QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX && // (2)
- best_key && s->quick->index == best_key->key && // (2)
- best_max_key_part >= s->table->opt_range[best_key->key].key_parts) &&// (2)
- !((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
- ! s->table->covering_keys.is_clear_all() && best_key && !s->quick) &&// (3)
- !(s->table->force_index && best_key && !s->quick) && // (4)
- !(best_key && s->table->pos_in_table_list->jtbm_subselect)) // (5)
+ best.key && s->quick->index == best.key->key && // (2)
+ best.max_key_part >= table->opt_range[best.key->key].key_parts) &&// (2)
+ !((file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
+ !table->covering_keys.is_clear_all() && best.key && !s->quick) &&// (3)
+ !(table->force_index_join && best.key && !s->quick) && // (4)
+ !(best.key && table->pos_in_table_list->jtbm_subselect)) // (5)
{ // Check full join
- double rnd_records= matching_candidates_in_table(s, found_constraint,
- use_cond_selectivity);
+ double records_after_filter, org_records;
+ double records_best_filter, cur_cost;
+ Range_rowid_filter_cost_info *filter= 0;
+ double startup_cost= s->startup_cost;
+ const char *scan_type= "";
+ enum join_type type;
+ uint forced_index= MAX_KEY;
+ bool force_plan= 0, use_join_buffer= 0;
+ ulonglong refills= 1;
/*
Range optimizer never proposes a RANGE if it isn't better
@@ -8604,171 +9041,333 @@ best_access_path(JOIN *join,
Here we estimate its cost.
*/
- filter= 0;
if (s->quick)
{
/*
For each record we:
- read record range through 'quick'
- skip rows which does not satisfy WHERE constraints
- TODO:
+
+ Note that s->quick->read_time includes the cost of comparing
+ the row with the where clause (WHERE_COST)
+
+ TODO:
We take into account possible use of join cache for ALL/index
access (see first else-branch below), but we don't take it into
account here for range/index_merge access. Find out why this is so.
*/
- double cmp_time= (s->found_records - rnd_records) / TIME_FOR_COMPARE;
- tmp= COST_MULT(record_count,
- COST_ADD(s->quick->read_time, cmp_time));
+ cur_cost= COST_MULT(s->quick->read_time, record_count);
- if ( s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE)
+ /*
+ Use record count from range optimizer.
+ This is done to make records found comparable to what we get with
+ 'ref' access.
+ */
+ org_records= records_after_filter= rows2double(s->found_records);
+ records_best_filter= org_records;
+ set_if_smaller(best.records_out, records_best_filter);
+
+ if (s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE)
{
- double rows= record_count * s->found_records;
- double access_cost_factor= MY_MIN(tmp / rows, 1.0);
uint key_no= s->quick->index;
+ TABLE::OPT_RANGE *range= &table->opt_range[key_no];
- /* See the comment concerning using rowid filter for with ref access */
- keyread_tmp= s->table->opt_range[key_no].index_only_cost *
- record_count;
- access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0);
- if (access_cost_factor > 0.0)
+ /*
+ Ensure that 'range' and 's' are comming from the same source
+ The complex 'double' comparison is there because floating point
+ registers complications when costs are calculated.
+ */
+ DBUG_ASSERT(range->rows == s->found_records);
+ DBUG_ASSERT((range->cost.total_cost() == 0.0 &&
+ s->quick->read_time == 0.0) ||
+ (range->cost.total_cost() / s->quick->read_time <= 1.0000001 &&
+ range->cost.total_cost() / s->quick->read_time >= 0.9999999));
+
+ range->get_costs(&tmp);
+ if (table->can_use_rowid_filter(key_no))
{
- filter=
- s->table->
- best_range_rowid_filter_for_partial_join(key_no, rows,
- access_cost_factor);
+ filter= table->best_range_rowid_filter(key_no,
+ rows2double(range->rows),
+ file->cost(&tmp),
+ file->cost(tmp.index_cost),
+ record_count,
+ &records_best_filter);
+ set_if_smaller(best.records_out, records_best_filter);
if (filter)
{
- tmp-= filter->get_adjusted_gain(rows);
- DBUG_ASSERT(tmp >= 0);
+ filter= filter->apply_filter(thd, table, &tmp,
+ &records_after_filter,
+ &startup_cost,
+ range->ranges,
+ record_count);
+ if (filter)
+ {
+ tmp.row_cost.cpu+= records_after_filter * WHERE_COST_THD(thd);
+ cur_cost= file->cost_for_reading_multiple_times(record_count,
+ &tmp);
+ cur_cost= COST_ADD(cur_cost, startup_cost);
+ startup_cost= 0; // Avoid adding it again later
+ table->opt_range[key_no].selectivity= filter->selectivity;
+ }
}
}
- else
- trace_access_scan.add("rowid_filter_skipped", "cost_factor <= 0");
-
+ if (best.key && key_no == best.key->key &&
+ !best.found_ref &&
+ best.max_key_part < table->opt_range[best.key->key].key_parts &&
+ table->opt_range[best.key->key].ranges == 1)
+ {
+ /*
+ Force to use range as it is using the 'best key' and using more
+ key parts (and thus will read less rows)
+ */
+ force_plan= 1;
+ }
type= JT_RANGE;
}
else
{
type= JT_INDEX_MERGE;
- best_filter= 0;
}
loose_scan_opt.check_range_access(join, idx, s->quick);
}
else
{
+ double records_table_filter;
+
+ /* We will now calculate cost of scan, with or without join buffer */
+ records_best_filter= records_after_filter=
+ apply_selectivity_for_table(s, use_cond_selectivity);
+ records_table_filter= ((found_constraint) ?
+ use_found_constraint(records_after_filter) :
+ records_after_filter);
+
+ DBUG_ASSERT(records_after_filter <= s->records);
+ DBUG_ASSERT(records_after_filter <= s->found_records);
+
+ set_if_smaller(best.records_out, records_table_filter);
+
+ org_records= rows2double(s->records);
+
/* Estimate cost of reading table. */
- if (s->table->force_index && !best_key) // index scan
+ if (s->cached_forced_index_type)
{
- type= JT_NEXT;
- tmp= s->table->file->read_time(s->ref.key, 1, s->records);
+ type= s->cached_forced_index_type;
+ cur_cost= s->cached_forced_index_cost;
+ forced_index= s->cached_forced_index;
}
- else // table scan
+ else
{
- tmp= s->scan_time();
- type= JT_ALL;
+ if (table->force_index_join && !best.key)
+ {
+ /*
+ The query is using 'forced_index' and we did not find a usable key.
+ Calculate cost of a table scan with the forced index.
+ */
+ type= JT_NEXT;
+ if (s->cached_covering_key != MAX_KEY)
+ {
+ /* Use value from estimate_scan_time */
+ forced_index= s->cached_covering_key;
+ cur_cost= s->cached_scan_and_compare_time;
+ }
+ else
+ {
+#ifdef FORCE_INDEX_SHOULD_FORCE_INDEX_SCAN
+ /* No cached key, use shortest allowed key */
+ key_map keys= *file->keys_to_use_for_scanning();
+ keys.intersect(table->keys_in_use_for_query);
+ if ((forced_index= find_shortest_key(table, &keys)) < MAX_KEY)
+ {
+ ALL_READ_COST cost= cost_for_index_read(thd, table,
+ forced_index,
+ s->records, 0);
+ cur_cost= file->cost(cost);
+ /* Calculate cost of checking the attached WHERE */
+ cur_cost= COST_ADD(cur_cost,
+ s->records * WHERE_COST_THD(thd));
+ }
+ else
+#endif
+ {
+ /* No usable key, use table scan */
+ cur_cost= s->cached_scan_and_compare_time;
+ type= JT_ALL;
+ }
+ }
+ }
+ else // table scan
+ {
+ cur_cost= s->cached_scan_and_compare_time;
+ type= JT_ALL;
+ }
+ /* Cache result for other calls */
+ s->cached_forced_index_type= type;
+ s->cached_forced_index_cost= cur_cost;
+ s->cached_forced_index= forced_index;
}
- if ((s->table->map & join->outer_join) || disable_jbuf) // Can't use join cache
+ if (disable_jbuf || (table->map & join->outer_join))
{
/*
- For each record we have to:
- - read the whole table record
- - skip rows which does not satisfy join condition
+ Simple scan
+ We estimate we have to read org_records rows.
+ records_after_filter rows will survive the where check of constants.
+ 'best.records_out' rows will survive after the check against columns
+ from previous tables.
*/
- double cmp_time= (s->records - rnd_records)/TIME_FOR_COMPARE;
- tmp= COST_MULT(record_count, COST_ADD(tmp,cmp_time));
+ scan_type= "scan";
+
+ /*
+ We have to compare each row set against all previous row combinations
+ */
+ cur_cost= COST_MULT(cur_cost, record_count);
}
else
{
- double refills= (1.0 + floor((double) cache_record_length(join,idx) *
- (record_count /
- (double) thd->variables.join_buff_size)));
- tmp= COST_MULT(tmp, refills);
- /*
- We don't make full cartesian product between rows in the scanned
- table and existing records because we skip all rows from the
- scanned table, which does not satisfy join condition when
- we read the table (see flush_cached_records for details). Here we
- take into account cost to read and skip these records.
+ /* Scan trough join cache */
+ double cmp_time, row_copy_cost, tmp_refills;
+
+ /*
+ Note that the cost of checking all rows against the table specific
+ WHERE is already included in cur_cost.
+ */
+ scan_type= "scan_with_join_cache";
+
+ /* Calculate cost of refills */
+ tmp_refills= (1.0 + floor((double) cache_record_length(join,idx) *
+ (record_count /
+ (double) thd->variables.join_buff_size)));
+ cur_cost= COST_MULT(cur_cost, tmp_refills);
+ refills= (ulonglong) tmp_refills;
+
+ /* We come here only if there are already rows in the join cache */
+ DBUG_ASSERT(idx != join->const_tables);
+ /*
+ records_after_filter is the number of rows that have survived
+ the table specific WHERE check that only involves constants.
+
+ Calculate cost of:
+ - Copying all previous record combinations to the join cache
+ - Copying the tables from the join cache to table records
+ - Checking the WHERE against the final row combination
*/
- double cmp_time= (s->records - rnd_records)/TIME_FOR_COMPARE;
- tmp= COST_ADD(tmp, cmp_time);
+ row_copy_cost= (ROW_COPY_COST_THD(thd) *
+ JOIN_CACHE_ROW_COPY_COST_FACTOR(thd));
+ cmp_time= (record_count * row_copy_cost +
+ records_after_filter * record_count *
+ ((idx - join->const_tables) * row_copy_cost +
+ WHERE_COST_THD(thd)));
+ cur_cost= COST_ADD(cur_cost, cmp_time);
+ use_join_buffer= 1;
}
}
- trace_access_scan.add("access_type", type == JT_ALL ?
- "scan" :
- join_type_str[type]);
/* Splitting technique cannot be used with join cache */
- if (s->table->is_splittable())
- tmp+= s->table->get_materialization_cost();
- else
- tmp+= s->startup_cost;
-
- /*
- We estimate the cost of evaluating WHERE clause for found records
- as record_count * rnd_records / TIME_FOR_COMPARE. This cost plus
- tmp give us total cost of using TABLE SCAN
- */
+ if (table->is_splittable())
+ startup_cost= table->get_materialization_cost();
+ cur_cost+= startup_cost;
- const double best_filter_cmp_gain= best_filter
- ? best_filter->get_cmp_gain(record_count * records)
- : 0;
- trace_access_scan.add("resulting_rows", rnd_records);
- trace_access_scan.add("cost", tmp);
+ if (unlikely(trace_access_scan.trace_started()))
+ {
+ trace_access_scan.
+ add("access_type",
+ type == JT_ALL ? scan_type : join_type_str[type]);
+ if (type == JT_RANGE)
+ trace_access_scan.
+ add("range_index", table->key_info[s->quick->index].name);
+ trace_access_scan.
+ add("rows", org_records).
+ add("rows_after_filter", records_after_filter).
+ add("rows_out", best.records_out).
+ add("cost", cur_cost);
+ if (type == JT_ALL)
+ {
+ trace_access_scan.add("index_only",
+ (s->cached_covering_key != MAX_KEY));
+ }
+ }
- if (best == DBL_MAX ||
- COST_ADD(tmp, record_count/TIME_FOR_COMPARE*rnd_records) <
- (best_key->is_for_hash_join() ? best_time :
- COST_ADD(best - best_filter_cmp_gain,
- record_count/TIME_FOR_COMPARE*records)))
+ if (cur_cost + COST_EPS < best.cost || force_plan)
{
/*
If the table has a range (s->quick is set) make_join_select()
will ensure that this will be used
*/
- best= tmp;
- records= rnd_records;
- best_key= 0;
- best_filter= 0;
- if (s->quick && s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE)
- best_filter= filter;
+ best.cost= cur_cost;
+ best.records_read= org_records; // Records accessed
+ best.records= records_after_filter; // Records to be checked against
+ // previous row combinations
+
+ /*
+ If we are using 'use_cond_selectivity > 1' then
+ table_after_join_selectivity may take into account other
+ filters that what is currently used so we have to use
+ records_after_filter. If 'use_cond_selectivity <= 1 then we
+ can use information from the best filter.
+ */
+ best.records_after_filter= ((use_cond_selectivity > 1) ?
+ records_after_filter :
+ records_best_filter);
+ best.key= 0;
+ best.forced_index= forced_index;
+ /*
+ filter is only set if
+ s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE
+ */
+ best.filter= filter;
/* range/index_merge/ALL/index access method are "independent", so: */
- best_ref_depends_map= 0;
- best_uses_jbuf= MY_TEST(!disable_jbuf && !((s->table->map &
- join->outer_join)));
- spl_plan= 0;
- best_type= type;
+ best.ref_depends_map= 0;
+ best.use_join_buffer= use_join_buffer;
+ best.refills= (ulonglong) ceil(refills);
+ best.spl_plan= 0;
+ best.type= type;
+ trace_access_scan.add("chosen", true);
}
- trace_access_scan.add("chosen", best_key == NULL);
+ else
+ trace_access_scan.add("chosen", false);
}
else
{
- trace_access_scan.add("type", "scan");
- trace_access_scan.add("chosen", false);
- trace_access_scan.add("cause", "cost");
+ if (unlikely(trace_access_scan.trace_started()))
+ trace_access_scan.
+ add("type", "scan").
+ add("chosen", false).
+ add("cause", "cost");
}
+ crash_if_first_double_is_bigger(best.records_out, best.records);
+ crash_if_first_double_is_bigger(best.records_out, best.records_read);
+
/* Update the cost information for the current partial plan */
- pos->records_read= records;
- pos->read_time= best;
- pos->key= best_key;
- pos->type= best_type;
+ pos->loops= record_count;
+ pos->records_init= best.records_read;
+ pos->records_after_filter= best.records_after_filter;
+ pos->records_read= best.records;
+ pos->records_out= best.records_out;
+ pos->prev_record_reads= best.prev_record_reads;
+ pos->identical_keys= best.identical_keys;
+ pos->read_time= best.cost;
+ pos->key= best.key;
+ pos->forced_index= best.forced_index;
+ pos->type= best.type;
pos->table= s;
- pos->ref_depend_map= best_ref_depends_map;
+ pos->ref_depend_map= best.ref_depends_map;
pos->loosescan_picker.loosescan_key= MAX_KEY;
- pos->use_join_buffer= best_uses_jbuf;
- pos->spl_plan= spl_plan;
- pos->range_rowid_filter_info= best_filter;
- pos->key_dependent= (best_type == JT_EQ_REF ? (table_map) 0 :
+ pos->use_join_buffer= best.use_join_buffer;
+ pos->firstmatch_with_join_buf= 0;
+ pos->spl_plan= best.spl_plan;
+ pos->range_rowid_filter_info= best.filter;
+ pos->key_dependent= (best.type == JT_EQ_REF ? (table_map) 0 :
key_dependent & remaining_tables);
+ pos->refills= best.refills;
- loose_scan_opt.save_to_position(s, loose_scan_pos);
+ loose_scan_opt.save_to_position(s, record_count, pos->records_out,
+ loose_scan_pos);
- if (!best_key &&
- idx == join->const_tables &&
- s->table == join->sort_by_table &&
- join->unit->lim.get_select_limit() >= records)
+ if (!best.key &&
+ idx == join->const_tables && // First table
+ table == join->sort_by_table &&
+ join->unit->lim.get_select_limit() >= best.records) // QQQ Why?
{
trace_access_scan.add("use_tmp_table", true);
join->sort_by_table= (TABLE*) 1; // Must use temporary table
@@ -8777,7 +9376,7 @@ best_access_path(JOIN *join,
trace_paths.end();
if (unlikely(thd->trace_started()))
- print_best_access_for_table(thd, pos, best_type);
+ print_best_access_for_table(thd, pos);
DBUG_VOID_RETURN;
}
@@ -8825,6 +9424,7 @@ static void choose_initial_table_order(JOIN *join)
JOIN_TAB **tab= join->best_ref + join->const_tables;
JOIN_TAB **tabs_end= tab + join->table_count - join->const_tables;
DBUG_ENTER("choose_initial_table_order");
+
/* Find where the top-level JOIN_TABs end and subquery JOIN_TABs start */
for (; tab != tabs_end; tab++)
{
@@ -8905,6 +9505,7 @@ static void choose_initial_table_order(JOIN *join)
@param join pointer to the structure providing all context info for
the query
@param join_tables set of the tables in the query
+ @param emb_sjm_nest List of tables in case of materialized semi-join nest
@retval
FALSE ok
@@ -8913,13 +9514,14 @@ static void choose_initial_table_order(JOIN *join)
*/
bool
-choose_plan(JOIN *join, table_map join_tables)
+choose_plan(JOIN *join, table_map join_tables, TABLE_LIST *emb_sjm_nest)
{
uint search_depth= join->thd->variables.optimizer_search_depth;
uint use_cond_selectivity=
join->thd->variables.optimizer_use_condition_selectivity;
bool straight_join= MY_TEST(join->select_options & SELECT_STRAIGHT_JOIN);
THD *thd= join->thd;
+ qsort2_cmp jtab_sort_func;
DBUG_ENTER("choose_plan");
join->cur_embedding_map= 0;
@@ -8927,26 +9529,33 @@ choose_plan(JOIN *join, table_map join_tables)
join->prune_level= join->thd->variables.optimizer_prune_level;
reset_nj_counters(join, join->join_list);
- qsort2_cmp jtab_sort_func;
- if (join->emb_sjm_nest)
+ if ((join->emb_sjm_nest= emb_sjm_nest))
{
/* We're optimizing semi-join materialization nest, so put the
tables from this semi-join as first
*/
jtab_sort_func= join_tab_cmp_embedded_first;
+ /*
+ If we are searching for the execution plan of a materialized semi-join
+ nest then allowed_tables contains bits only for the tables from this
+ nest.
+ */
+ join->allowed_tables= (emb_sjm_nest->sj_inner_tables &
+ ~join->const_table_map);
}
else
{
/*
if (SELECT_STRAIGHT_JOIN option is set)
reorder tables so dependent tables come after tables they depend
- on, otherwise keep tables in the order they were specified in the query
+ on, otherwise keep tables in the order they were specified in the query
else
- Apply heuristic: pre-sort all access plans with respect to the number of
- records accessed.
+ Apply heuristic: pre-sort all access plans with respect to the number
+ of records accessed.
*/
jtab_sort_func= straight_join ? join_tab_cmp_straight : join_tab_cmp;
+ join->allowed_tables= ~join->const_table_map;
}
/*
@@ -8957,19 +9566,19 @@ choose_plan(JOIN *join, table_map join_tables)
*/
my_qsort2(join->best_ref + join->const_tables,
join->table_count - join->const_tables, sizeof(JOIN_TAB*),
- jtab_sort_func, (void*)join->emb_sjm_nest);
+ jtab_sort_func, (void*) emb_sjm_nest);
Json_writer_object wrapper(thd);
Json_writer_array trace_plan(thd,"considered_execution_plans");
- if (!join->emb_sjm_nest)
- {
+ if (!emb_sjm_nest)
choose_initial_table_order(join);
- }
+
/*
Note: constant tables are already in the join prefix. We don't
put them into the cur_sj_inner_tables, though.
*/
+
join->cur_sj_inner_tables= 0;
if (straight_join)
@@ -8993,14 +9602,7 @@ choose_plan(JOIN *join, table_map join_tables)
DBUG_RETURN(TRUE);
}
- /*
- Store the cost of this query into a user variable
- Don't update last_query_cost for statements that are not "flat joins" :
- i.e. they have subqueries, unions or call stored procedures.
- TODO: calculate a correct cost for a query with subqueries and UNIONs.
- */
- if (join->thd->lex->is_single_level_stmt())
- join->thd->status_var.last_query_cost= join->best_read;
+ join->emb_sjm_nest= 0;
DBUG_RETURN(FALSE);
}
@@ -9266,42 +9868,96 @@ optimize_straight_join(JOIN *join, table_map remaining_tables)
{
POSITION *position= join->positions + idx;
Json_writer_object trace_one_table(thd);
+ double original_record_count, current_record_count;
+
if (unlikely(thd->trace_started()))
- trace_plan_prefix(join, idx, remaining_tables);
+ trace_plan_prefix(&trace_one_table, join, idx, remaining_tables);
/* Find the best access method from 's' to the current partial plan */
best_access_path(join, s, remaining_tables, join->positions, idx,
disable_jbuf, record_count,
position, &loose_scan_pos);
/* Compute the cost of the new plan extended with 's' */
- record_count= COST_MULT(record_count, position->records_read);
- const double filter_cmp_gain= position->range_rowid_filter_info
- ? position->range_rowid_filter_info->get_cmp_gain(record_count)
- : 0;
- read_time= COST_ADD(read_time,
- COST_ADD(position->read_time -
- filter_cmp_gain,
- record_count /
- TIME_FOR_COMPARE));
- optimize_semi_joins(join, remaining_tables, idx, &record_count, &read_time,
- &loose_scan_pos);
+ current_record_count= COST_MULT(record_count, position->records_out);
+ read_time= COST_ADD(read_time, position->read_time);
+ original_record_count= current_record_count;
+ optimize_semi_joins(join, remaining_tables, idx, &current_record_count,
+ &read_time, &loose_scan_pos);
+ if (position->sj_strategy != SJ_OPT_NONE && original_record_count)
+ {
+ /* Adjust records_out to contain the final number of rows */
+ double ratio= current_record_count / original_record_count;
+ if (ratio < 1)
+ {
+ position->records_out*= ratio;
+ }
+ if (unlikely(trace_one_table.trace_started()))
+ {
+ trace_one_table.
+ add("sj_rows_out", position->records_out).
+ add("sj_rows_for_plan", current_record_count).
+ add("sj_filtered", safe_filtered(position->records_out,
+ position->records_init));
+ }
+ }
remaining_tables&= ~(s->table->map);
- double pushdown_cond_selectivity= 1.0;
- if (use_cond_selectivity > 1)
- pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
- remaining_tables);
- position->cond_selectivity= pushdown_cond_selectivity;
+ if (use_cond_selectivity > 1 && position->sj_strategy == SJ_OPT_NONE)
+ {
+ double pushdown_cond_selectivity, records_out;
+ pushdown_cond_selectivity= table_after_join_selectivity(join, idx, s,
+ remaining_tables,
+ &records_out);
+ if (unlikely(thd->trace_started()) &&
+ pushdown_cond_selectivity != 1.0)
+ {
+ trace_one_table.
+ add("rows_out", records_out).
+ add("pushdown_cond_selectivity", pushdown_cond_selectivity).
+ add("filtered", safe_filtered(position->records_out,
+ position->records_init));
+ }
+ position->cond_selectivity= pushdown_cond_selectivity;
+ position->records_out= records_out;
+ current_record_count= COST_MULT(record_count, records_out);
+ }
+ else
+ position->cond_selectivity= 1.0;
++idx;
+ record_count= current_record_count;
}
if (join->sort_by_table &&
join->sort_by_table != join->positions[join->const_tables].table->table)
- read_time+= record_count; // We have to make a temp table
+ {
+ /*
+ We may have to make a temp table, note that this is only a
+ heuristic since we cannot know for sure at this point if we
+ we are going to use addon fields or to have flush sorting to
+ disk. We also don't know the temporary table will be in memory
+ or disk.
+ The following calculation takes a middle ground where assume
+ we can sort the keys in memory but have to use a disk based
+ temporary table to retrive the rows.
+ This cost is probably much bigger than it has to be...
+ */
+ double sort_cost;
+ sort_cost= (get_qsort_sort_cost((ha_rows)record_count, 0) +
+ record_count *
+ DISK_TEMPTABLE_LOOKUP_COST(thd));
+ {
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add("estimated_cost_for_sorting", sort_cost);
+ }
+ }
+ read_time= COST_ADD(read_time, sort_cost);
+ }
memcpy((uchar*) join->best_positions, (uchar*) join->positions,
sizeof(POSITION)*idx);
join->join_record_count= record_count;
- join->best_read= read_time - COST_EPS;
+ join->best_read= read_time;
}
@@ -9403,6 +10059,7 @@ greedy_search(JOIN *join,
// ==join->tables or # tables in the sj-mat nest we're optimizing
uint n_tables __attribute__((unused));
DBUG_ENTER("greedy_search");
+ DBUG_ASSERT(!(remaining_tables & join->const_table_map));
/* number of tables that remain to be optimized */
usable_tables= (join->emb_sjm_nest ?
@@ -9491,9 +10148,7 @@ greedy_search(JOIN *join,
/* compute the cost of the new plan extended with 'best_table' */
record_count= COST_MULT(record_count, join->positions[idx].records_read);
- read_time= COST_ADD(read_time,
- COST_ADD(join->positions[idx].read_time,
- record_count / TIME_FOR_COMPARE));
+ read_time= COST_ADD(read_time, join->positions[idx].read_time);
remaining_tables&= ~(best_table->table->map);
--size_remain;
@@ -9601,9 +10256,7 @@ void JOIN::get_partial_cost_and_fanout(int end_tab_idx,
if (tab->records_read && (cur_table_map & filter_map))
{
record_count= COST_MULT(record_count, tab->records_read);
- read_time= COST_ADD(read_time,
- COST_ADD(tab->read_time,
- record_count / TIME_FOR_COMPARE));
+ read_time= COST_ADD(read_time, tab->read_time);
if (tab->emb_sj_nest)
sj_inner_fanout= COST_MULT(sj_inner_fanout, tab->records_read);
}
@@ -9618,7 +10271,7 @@ void JOIN::get_partial_cost_and_fanout(int end_tab_idx,
if (tab == end_tab)
break;
}
- *read_time_arg= read_time;// + record_count / TIME_FOR_COMPARE;
+ *read_time_arg= read_time;
*record_count_arg= record_count;
}
@@ -9647,9 +10300,8 @@ void JOIN::get_prefix_cost_and_fanout(uint n_tables,
record_count= COST_MULT(record_count, best_positions[i].records_read);
read_time= COST_ADD(read_time, best_positions[i].read_time);
}
- /* TODO: Take into account condition selectivities here */
}
- *read_time_arg= read_time;// + record_count / TIME_FOR_COMPARE;
+ *read_time_arg= read_time;
*record_count_arg= record_count;
}
@@ -9680,8 +10332,7 @@ double JOIN::get_examined_rows()
COST_MULT((double) (tab->get_examined_rows()), prev_fanout));
prev_tab= tab;
}
- examined_rows= (double)
- (records > (double) HA_ROWS_MAX ? HA_ROWS_MAX : (ha_rows) records);
+ examined_rows= records;
return examined_rows;
}
@@ -9812,28 +10463,31 @@ double table_multi_eq_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
@brief
Get the selectivity of conditions when joining a table
- @param join The optimized join
- @param s The table to be joined for evaluation
- @param rem_tables The bitmap of tables to be joined later
+ @param join The optimized join
+ @param s The table to be joined for evaluation
+ @param rem_tables The bitmap of tables to be joined later
+ @param new_records_out OUT Set to number of rows accepted
@detail
Get selectivity of conditions that can be applied when joining this table
with previous tables.
For quick selects and full table scans, selectivity of COND(this_table)
- is accounted for in matching_candidates_in_table(). Here, we only count
+ is accounted for in apply_selectivity_for_table(). Here, we only count
selectivity of COND(this_table, previous_tables).
For other access methods, we need to calculate selectivity of the whole
condition, "COND(this_table) AND COND(this_table, previous_tables)".
@retval
- selectivity of the conditions imposed on the rows of s
+ selectivity of the conditions imposed on the rows of s related to
+ the rows that we are expected to read (position->records_init).
*/
static
-double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
- table_map rem_tables)
+double table_after_join_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
+ table_map rem_tables,
+ double *new_records_out)
{
uint16 ref_keyuse_steps_buf[MAX_REF_PARTS];
uint ref_keyuse_size= MAX_REF_PARTS;
@@ -9841,13 +10495,14 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
Field *field;
TABLE *table= s->table;
MY_BITMAP *read_set= table->read_set;
- double sel= s->table->cond_selectivity;
POSITION *pos= &join->positions[idx];
+ double sel, records_out= pos->records_out;
uint keyparts= 0;
uint found_part_ref_or_null= 0;
if (pos->key != 0)
{
+ sel= table->cond_selectivity;
/*
A ref access or hash join is used for this table. ref access is created
from
@@ -9896,13 +10551,15 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
{
key_part_map quick_key_map= (key_part_map(1) <<
table->opt_range[key].key_parts) - 1;
- if (table->opt_range[key].rows &&
- !(quick_key_map & ~table->const_key_parts[key]))
+ if (s->type == JT_RANGE ||
+ (table->opt_range[key].rows && (table->const_key_parts[key] & 1)))
{
- /*
- Ok, there is an equality for each of the key parts used by the
- quick select. This means, quick select's estimate can be reused to
- discount the selectivity of a prefix of a ref access.
+ /*
+ We are either using a range or we are using a REF which the
+ same key as an active range and the first key part is a constant.
+
+ In both cases we have to discount the selectivity for the range
+ as otherwise we are using the selectivity twice.
*/
for (; quick_key_map & 1 ; quick_key_map>>= 1)
{
@@ -9923,7 +10580,11 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
However if sel becomes greater than 2 then with high probability
something went wrong.
*/
- sel /= (double)table->opt_range[key].rows / (double) table->stat_records();
+ DBUG_ASSERT(sel <= 1.0);
+ DBUG_ASSERT(table->opt_range[key].rows <=
+ (double) table->stat_records());
+ sel /= ((double) table->opt_range[key].rows /
+ (double) table->stat_records());
set_if_smaller(sel, 1.0);
used_range_selectivity= true;
}
@@ -10017,38 +10678,26 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
}
keyuse++;
}
- }
- else
- {
/*
- The table is accessed with full table scan, or quick select.
- Selectivity of COND(table) is already accounted for in
- matching_candidates_in_table().
- */
- sel= 1;
- }
+ If the field f from the table is equal to a field from one the
+ earlier joined tables then the selectivity of the range conditions
+ over the field f must be discounted.
- /*
- If the field f from the table is equal to a field from one the
- earlier joined tables then the selectivity of the range conditions
- over the field f must be discounted.
-
- We need to discount selectivity only if we're using ref-based
- access method (and have sel!=1).
- If we use ALL/range/index_merge, then sel==1, and no need to discount.
- */
- if (pos->key != NULL)
- {
+ We need to discount selectivity only if we're using ref-based
+ access method (and have sel!=1).
+ If we use ALL/range/index_merge, then sel==1, and no need to discount.
+ */
for (Field **f_ptr=table->field ; (field= *f_ptr) ; f_ptr++)
{
if (!bitmap_is_set(read_set, field->field_index) ||
!field->next_equal_field)
- continue;
- for (Field *next_field= field->next_equal_field;
- next_field != field;
+ continue;
+ for (Field *next_field= field->next_equal_field;
+ next_field != field;
next_field= next_field->next_equal_field)
{
- if (!(next_field->table->map & rem_tables) && next_field->table != table)
+ if (!(next_field->table->map & rem_tables) &&
+ next_field->table != table)
{
if (field->cond_selectivity > 0)
{
@@ -10059,11 +10708,37 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
}
}
}
+ /*
+ We have now calculated a more exact 'records_out' taking more index
+ costs into account.
+ pos->records_out previously contained the smallest record count for
+ all range or ref access, which should not be smaller than what we
+ calculated above.
+ */
+ records_out= pos->records_init * sel;
+ set_if_smaller(records_out, pos->records_out);
}
- sel*= table_multi_eq_cond_selectivity(join, idx, s, rem_tables,
+ sel= table_multi_eq_cond_selectivity(join, idx, s, rem_tables,
keyparts, ref_keyuse_steps);
+ records_out*= sel;
+
+ /*
+ Update sel to be relative pos->records_read as that is what some old
+ code expects. Newer code should just use 'position->records_out' instead.
+ */
+ if (pos->records_read == 0)
+ sel= 1.0;
+ else
+ {
+ sel= records_out / pos->records_read;
+ DBUG_ASSERT(sel >= 0.0 and sel <= 1.00001);
+ if (sel > 1.0)
+ sel= 1.0;
+ }
+
exit:
+ *new_records_out= records_out;
if (ref_keyuse_steps != ref_keyuse_steps_buf)
my_free(ref_keyuse_steps);
return sel;
@@ -10084,7 +10759,7 @@ check_if_edge_table(POSITION *pos,
if ((pos->type == JT_EQ_REF ||
(pos->type == JT_REF &&
- pos->records_read == 1 &&
+ pos->records_init == 1 &&
!pos->range_rowid_filter_info)) &&
pushdown_cond_selectivity >= 0.999)
return SEARCH_FOUND_EDGE;
@@ -10277,7 +10952,7 @@ get_costs_for_tables(JOIN *join, table_map remaining_tables, uint idx,
// pplan_cost already too great, stop search
continue;
- pplan= expand pplan by best_access_method;
+ pplan= expand plan by best_access_method;
remaining_tables= remaining_tables - table T;
if (remaining_tables is not an empty set
and
@@ -10348,8 +11023,8 @@ best_extension_by_limited_search(JOIN *join,
{
THD *thd= join->thd;
/*
- 'join' is a partial plan with lower cost than the best plan so far,
- so continue expanding it further with the tables in 'remaining_tables'.
+ 'join' is a partial plan with lower cost than the best plan so far,
+ so continue expanding it further with the tables in 'remaining_tables'.
*/
JOIN_TAB *s;
double best_record_count= DBL_MAX;
@@ -10362,19 +11037,18 @@ best_extension_by_limited_search(JOIN *join,
SORT_POSITION *sort= (SORT_POSITION*) alloca(sizeof(SORT_POSITION)*tables_left);
SORT_POSITION *sort_end;
DBUG_ENTER("best_extension_by_limited_search");
-
DBUG_EXECUTE_IF("show_explain_probe_best_ext_lim_search",
- if (dbug_user_var_equals_int(thd,
+ if (dbug_user_var_equals_int(thd,
"show_explain_probe_select_id",
join->select_lex->select_number))
- dbug_serve_apcs(thd, 1);
- );
+ dbug_serve_apcs(thd, 1);
+ );
if (unlikely(thd->check_killed())) // Abort
DBUG_RETURN(SEARCH_ABORT);
DBUG_EXECUTE("opt", print_plan(join, idx, record_count, read_time, read_time,
- "part_plan"););
+ "part_plan"););
status_var_increment(thd->status_var.optimizer_join_prefixes_check_calls);
if (join->emb_sjm_nest)
@@ -10391,7 +11065,6 @@ best_extension_by_limited_search(JOIN *join,
/*
allowed_tables is used to check if there are tables left that can improve
a key search and to see if there are more tables to add in next iteration.
-
allowed_current_tables tells us which tables we can add to the current
plan at this stage.
*/
@@ -10405,7 +11078,7 @@ best_extension_by_limited_search(JOIN *join,
Json_writer_object trace_one_table(thd);
JOIN_TAB **best_ref= join->best_ref + idx;
if (unlikely(thd->trace_started()))
- trace_plan_prefix(join, idx, remaining_tables);
+ trace_plan_prefix(&trace_one_table, join, idx, remaining_tables);
Json_writer_array arr(thd, "get_costs_for_tables");
@@ -10464,14 +11137,15 @@ best_extension_by_limited_search(JOIN *join,
!check_interleaving_with_nj(s))
{
table_map real_table_bit= s->table->map;
- double current_record_count, current_read_time;
+ double current_record_count, current_read_time, original_record_count;
double partial_join_cardinality;
POSITION *position= join->positions + idx, *loose_scan_pos;
+ double pushdown_cond_selectivity;
Json_writer_object trace_one_table(thd);
if (unlikely(thd->trace_started()))
{
- trace_plan_prefix(join, idx, remaining_tables);
+ trace_plan_prefix(&trace_one_table, join, idx, remaining_tables);
trace_one_table.add_table_name(s);
}
@@ -10480,26 +11154,35 @@ best_extension_by_limited_search(JOIN *join,
loose_scan_pos= pos->position+1;
/* Compute the cost of the new plan extended with 's' */
- current_record_count= COST_MULT(record_count, position->records_read);
- const double filter_cmp_gain= position->range_rowid_filter_info
- ? position->range_rowid_filter_info->get_cmp_gain(current_record_count)
- : 0;
- current_read_time= COST_ADD(read_time,
- COST_ADD(position->read_time -
- filter_cmp_gain,
- current_record_count /
- TIME_FOR_COMPARE));
+ current_record_count= COST_MULT(record_count, position->records_out);
+ current_read_time= COST_ADD(read_time, position->read_time);
- if (unlikely(thd->trace_started()))
+ if (unlikely(trace_one_table.trace_started()))
{
- trace_one_table.add("rows_for_plan", current_record_count);
- trace_one_table.add("cost_for_plan", current_read_time);
+ trace_one_table.
+ add("rows_for_plan", current_record_count).
+ add("cost_for_plan", current_read_time);
}
+ original_record_count= current_record_count;
optimize_semi_joins(join, remaining_tables, idx, &current_record_count,
&current_read_time, loose_scan_pos);
-
+ if (position->sj_strategy != SJ_OPT_NONE)
+ {
+ /* Adjust records_out and current_record_count after semi join */
+ double ratio= current_record_count / original_record_count;
+ if (ratio < 1.0)
+ position->records_out*= ratio;
+ if (unlikely(trace_one_table.trace_started()))
+ {
+ trace_one_table.
+ add("sj_rows_out", position->records_out).
+ add("sj_rows_for_plan", current_record_count).
+ add("sj_filtered", safe_filtered(position->records_out,
+ position->records_init));
+ }
+ }
/* Expand only partial plans with lower cost than the best QEP so far */
- if (current_read_time >= join->best_read)
+ if (current_read_time + COST_EPS >= join->best_read)
{
DBUG_EXECUTE("opt", print_plan(join, idx+1,
current_record_count,
@@ -10509,7 +11192,7 @@ best_extension_by_limited_search(JOIN *join,
trace_one_table
.add("pruned_by_cost", true)
.add("current_cost", current_read_time)
- .add("best_cost", join->best_read + COST_EPS);
+ .add("best_cost", join->best_read);
restore_prev_nj_state(s);
restore_prev_sj_state(remaining_tables, s, idx);
@@ -10548,19 +11231,19 @@ best_extension_by_limited_search(JOIN *join,
if (best_record_count > current_record_count ||
best_read_time > current_read_time ||
(idx == join->const_tables && // 's' is the first table in the QEP
- s->table == join->sort_by_table))
+ s->table == join->sort_by_table))
{
/*
Store the current record count and cost as the best
possible cost at this level if the following holds:
- It's the lowest record number and cost so far
- - There is no remaing table that could improve index usage
- or we found an EQ_REF or REF key with less than 2
- matching records (good enough).
+ - There is no remaing table that could improve index usage
+ or we found an EQ_REF or REF key with less than 2
+ matching records (good enough).
*/
if (best_record_count >= current_record_count &&
best_read_time >= current_read_time &&
- (!(position->key_dependent & allowed_tables) ||
+ (!(position->key_dependent & join->allowed_tables) ||
position->records_read < 2.0))
{
best_record_count= current_record_count;
@@ -10607,28 +11290,41 @@ best_extension_by_limited_search(JOIN *join,
}
}
- double pushdown_cond_selectivity= 1.0;
- if (use_cond_selectivity > 1)
- pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
- remaining_tables &
- ~real_table_bit);
+ pushdown_cond_selectivity= 1.0;
+ /*
+ TODO: When a semi-join strategy is applied (sj_strategy!=SJ_OPT_NONE),
+ we should account for selectivity from table_after_join_selectivity().
+ (Condition filtering is performed before the semi-join removes some
+ fanout so this might require moving the code around)
+ */
+ if (use_cond_selectivity > 1 && position->sj_strategy == SJ_OPT_NONE)
+ {
+ pushdown_cond_selectivity=
+ table_after_join_selectivity(join, idx, s,
+ remaining_tables & ~real_table_bit,
+ &position->records_out);
+
+ if (unlikely(trace_one_table.trace_started()) &&
+ pushdown_cond_selectivity != 1.0)
+ trace_one_table.
+ add("pushdown_cond_selectivity", pushdown_cond_selectivity).
+ add("filtered", safe_filtered(position->records_out,
+ position->records_init)).
+ add("rows_out", position->records_out);
+ }
join->positions[idx].cond_selectivity= pushdown_cond_selectivity;
- partial_join_cardinality= (current_record_count *
- pushdown_cond_selectivity);
+ partial_join_cardinality= record_count * position->records_out;
- if (unlikely(thd->trace_started()))
- {
- if (pushdown_cond_selectivity < 1.0)
- {
- trace_one_table.add("selectivity", pushdown_cond_selectivity);
- trace_one_table.add("estimated_join_cardinality",
- partial_join_cardinality);
- }
- }
+ if (unlikely(thd->trace_started()) && pushdown_cond_selectivity < 1.0 &&
+ partial_join_cardinality < current_record_count)
+ trace_one_table
+ .add("selectivity", pushdown_cond_selectivity)
+ .add("estimated_join_cardinality", partial_join_cardinality);
+
+ if (search_depth > 1 &&
+ ((remaining_tables & ~real_table_bit) & join->allowed_tables))
- if ((search_depth > 1) &&
- ((remaining_tables & ~real_table_bit) & allowed_tables))
{
/* Recursively expand the current partial plan */
Json_writer_array trace_rest(thd, "rest_of_plan");
@@ -10666,18 +11362,28 @@ best_extension_by_limited_search(JOIN *join,
{
/*
We may have to make a temp table, note that this is only a
- heuristic since we cannot know for sure at this point.
- Hence it may be wrong.
+ heuristic since we cannot know for sure at this point if we
+ we are going to use addon fields or to have flush sorting to
+ disk. We also don't know the temporary table will be in memory
+ or disk.
+ The following calculation takes a middle ground where assume
+ we can sort the keys in memory but have to use a disk based
+ temporary table to retrive the rows.
+ This cost is probably much bigger than it has to be...
*/
- trace_one_table.add("cost_for_sorting", current_record_count);
- current_read_time= COST_ADD(current_read_time, current_record_count);
+ double sort_cost;
+ sort_cost= (get_qsort_sort_cost((ha_rows)current_record_count,0) +
+ current_record_count *
+ DISK_TEMPTABLE_LOOKUP_COST(thd));
+ trace_one_table.add("cost_for_sorting", sort_cost);
+ current_read_time= COST_ADD(current_read_time, sort_cost);
}
if (current_read_time < join->best_read)
{
memcpy((uchar*) join->best_positions, (uchar*) join->positions,
sizeof(POSITION) * (idx + 1));
join->join_record_count= partial_join_cardinality;
- join->best_read= current_read_time - COST_EPS;
+ join->best_read= current_read_time;
}
DBUG_EXECUTE("opt", print_plan(join, idx+1,
current_record_count,
@@ -10927,92 +11633,241 @@ cache_record_length(JOIN *join,uint idx)
return length;
}
-
/*
- Get the number of different row combinations for subset of partial join
+ Estimate the number of engine ha_index_read_calls for EQ_REF tables
+ when taking into account the one-row-cache in join_read_always_key()
SYNOPSIS
- prev_record_reads()
- join The join structure
- idx Number of tables in the partial join order (i.e. the
- partial join order is in join->positions[0..idx-1])
- found_ref Bitmap of tables for which we need to find # of distinct
- row combinations.
+ @param position All previous tables best_access_path() information.
+ @param idx Number of (previous) tables in positions.
+ @param record_count Number of incoming record combinations
+ @param found_ref Bitmap of tables that is used to construct the key
+ used with the index read.
- DESCRIPTION
- Given a partial join order (in join->positions[0..idx-1]) and a subset of
- tables within that join order (specified in found_ref), find out how many
- distinct row combinations of subset tables will be in the result of the
- partial join order.
-
- This is used as follows: Suppose we have a table accessed with a ref-based
- method. The ref access depends on current rows of tables in found_ref.
- We want to count # of different ref accesses. We assume two ref accesses
- will be different if at least one of access parameters is different.
- Example: consider a query
-
- SELECT * FROM t1, t2, t3 WHERE t1.key=c1 AND t2.key=c2 AND t3.key=t1.field
-
- and a join order:
- t1, ref access on t1.key=c1
- t2, ref access on t2.key=c2
- t3, ref access on t3.key=t1.field
-
- For t1: n_ref_scans = 1, n_distinct_ref_scans = 1
- For t2: n_ref_scans = records_read(t1), n_distinct_ref_scans=1
- For t3: n_ref_scans = records_read(t1)*records_read(t2)
- n_distinct_ref_scans = #records_read(t1)
-
- The reason for having this function (at least the latest version of it)
- is that we need to account for buffering in join execution.
-
- An edge-case example: if we have a non-first table in join accessed via
- ref(const) or ref(param) where there is a small number of different
- values of param, then the access will likely hit the disk cache and will
- not require any disk seeks.
-
- The proper solution would be to assume an LRU disk cache of some size,
- calculate probability of cache hits, etc. For now we just count
- identical ref accesses as one.
+ @return # The number of estimated calls that cannot be cached by the
+ the one-row-cache. In other words, number of expected
+ calls to engine ha_read_read_map().
+ Between 1 and record_count or 0 if record_count == 0
- RETURN
- Expected number of row combinations
+ DESCRIPTION
+ The one-row-cache gives a great benefit when there are multiple consecutive
+ calls to ha_index_read() with the same key. In this case we can skip
+ calling the engine (and in the future also skip to check the key
+ condition), which can notably increase the performance.
+
+ Assuming most of the rows are cached, there is no notable saving to be
+ made trying to calculate the total number of distinct key values that will
+ be used. The performance of a ha_index_read_call() is about the same even
+ if we repeatedly read the same set of rows.
+
+ This code works by calculating the number of identical key sequences
+ found in the record stream.
+ The number of expected distinct calls can then be calculated as
+ records_count / sequences.
+
+ Some things to note:
+ - record_count == PRODUCT(records_out) over all tables[0...idx-1]
+ - position->prev_record_reads contains the number of identical
+ sequences found for previous EQ_REF tables.
+
+ Assume a join prefix of t1,t2,t3,t4 and t4 is an EQ_REF table.
+ We have the following combinations that we have to consider:
+
+======
+1) No JOIN_CACHE usage, tables depend only on one previous table
+
+ Row combinations are generated as:
+ - for all rows in t1
+ - for all rows in t2
+ - for all rows in t3
+ or
+ t1.1,t2.1,t3.1, t1.1,t2.1,t3.2, t1.1,t2.1,t3.3... # Only t3 row changes
+ (until no more rows in t3., ie t3.records_out times)
+ t1.1,t2.2,t3.1, t1.1,t2.2,t3.2, t1.1,t2.2,t3.3... # t2.2 read
+ (above repeated until no more rows in t2 and t3)
+ t1.2,t2.1,t3.1, t1.2,t2.1,t3.2, t1.2,t2.1,t3.3... # t1.2 read
+
+ If t4 is an EQ_REF table that is depending of one of the
+ previous tables, the number of identical keys can be calculated
+ as the multiplication of records_out of the tables in between
+ the t4 and its first dependency.
+
+ Let's consider cases where t4 depends on different previous tables:
+ WHERE t4.a=t3.a
+ no caching as t3 can change for each row
+ engine_calls: record_count
+
+ WHERE t4.a=t2.a
+ t4 is not depending on t3. The number of repeated rows are:
+ t1.1,t2.1,t3.1 to t1.1,t2.1,t3.last # t3.records_out rows
+ t1.1,t2.2,t3.1 to t1.1,t2.2,t3.last # t3.records_out rows
+ ...
+ t1.2,t2.1,t3.1 to t1.2,t2.1,t3.last
+ ...
+ t1.last,t2.last.t3.1 to t1.last,t2.last.1,t3.last
+
+ For each combination of t1 and t2 there are t3.records_out repeated
+ rows with equal key value
+ engine_calls: record_count / t3.records_out calls =
+ t1.records_out * t2.records_out
+
+ WHERE t4.a=t1.a
+ The repeated sequences:
+ t1.1,t2.1,t3.1 to t1.1,t2.last,t3.last
+ t1.2,t2.1,t3.1 to t2.1,t2.last,t3.last
+ repeated rows: t2.records_out * t3.records_out
+ engine_calls: record_count/repeated_rows = t1.records_out
+
+ If t4 depends on a table that uses EQ_REF access, we can multipy that
+ table's repeated_rows with current table's repeated_rows to take that
+ into account.
+
+=====
+2) Keys depending on multiple tables
+
+ In this case we have to stop searching after we find the first
+ table we depend upon.
+ We have to also disregard the number of repeated rows for the
+ found table. This can be seen from (assuming tables t1...t6):
+
+ WHERE t6.a=t4.a and t6.a=t3.a and t4.a= t2.a
+ - Here t4 is not depending on t3 (and thus there is a
+ t3.records_out identical keys for t4). However t6 key will
+ change for each t3 row and t6 cannot thus use
+ t3.identical_keys
+
+ WHERE t4.key_part1=t1.a and t4.key_part2= t3.a
+ As t4.key_part2 will change for every row, one-row-cache will not
+ be hit
+
+ WHERE t4.key_part1=t1.a and t4.key_part2= t2.a
+ t4.key will change when t1 or t2 changes
+ This is the same case as above for WHERE t4.a = t2.a
+ engine_calls: record_count / t3.records_out calls
+
+=====
+3) JOIN_CACHE is used
+
+ If any table is using join_cache as this changes the row
+ combinations seen by following tables. Using join cache for a
+ table T# will have T# rows repeated for the next table as many
+ times there are combinations in the cache. The the cache will
+ re-read and the operations repeats 'refill-1' number of times.
+
+ Table rows from table just before T# will come in 'random order',
+ from the point of the next tables.
+
+ Assuming t3 is using a cache, t4 will see the rows coming in the
+ following order:
+ t1.1,t2.1,t3.1, t1.1,t2.2,t3.1, t1.1,t2.3,t3.1...
+ (t3.1 repeated 't2.records_out' times)
+ t1.2,t2.1,t3.1, t1.2,t2.2,t3.1, t1.2,t2.3,t3.1...
+ (Next row in t1 used)
+ t1.1,t2.1,t3.2, t1.1,t2.2,t3.2, t1.1,t2.3,t3.2...
+ (Restarting all t1 & t2 combinations for t3.2)
+
+ WHERE t4.a=t3.a
+ - There is a repeated sequence of t3.records_out rows for
+ each t1,t2 row combination.
+ engine_calls= record_count / t3.records_out
+
+ WHERE t4.a=t2.a
+ t2 changes for each row
+ engine_calls= record_count
+
+ WHERE t4.a=t1.a
+ repeated rows= t2.records_out
+ engine_calls= record_count / t2.records_out
+
+ A refill of the join cache will restart the row sequences
+ (we have 'refill' more sequences), so we will have to do 'refill' times
+ more engine read calls.
+
+=====
+ Expectations of the accuracy of the return value
+
+ - The value is always between 1 and record_count
+ - The returned value should almost always larger than the true number of
+ engine calls.
+
+ - Assuming that every row has different values for all other columns for
+ echo unique key value and record_count is accurate:
+ - If a table is depending on multiple tables, the return value may be
+ notable larger than real value.
+ - If there is no join cache the value should be exact.
+ - If there is a join cache, but no refills calculated or done then
+ the value should be exact.
+ - If there was more join_cache refills than was calculated, the value
+ may be slightly to low.
+ - If the number of refills is equal or less than was calculated the value
+ should be larger than the expected engine read calls. The more refills,
+ the less exact the number will be.
*/
-double
-prev_record_reads(const POSITION *positions, uint idx, table_map found_ref)
+static double
+prev_record_reads(const POSITION *position, uint idx, table_map found_ref,
+ double record_count, double *identical_keys)
{
- double found=1.0;
- const POSITION *pos_end= positions - 1;
- for (const POSITION *pos= positions + idx - 1; pos != pos_end; pos--)
+ double found= 1.0;
+ const POSITION *pos_end= position - 1;
+ const POSITION *cur_pos= position + idx;
+
+ /* Safety against const tables */
+ if (unlikely(!found_ref))
+ goto end;
+
+ for (const POSITION *pos= cur_pos-1; pos != pos_end; pos--)
{
- if (pos->table->table->map & found_ref)
+ if (found_ref & pos->table->table->map)
{
- found_ref|= pos->ref_depend_map;
- /*
- For the case of "t1 LEFT JOIN t2 ON ..." where t2 is a const table
- with no matching row we will get position[t2].records_read==0.
- Actually the size of output is one null-complemented row, therefore
- we will use value of 1 whenever we get records_read==0.
-
- Note
- - the above case can't occur if inner part of outer join has more
- than one table: table with no matches will not be marked as const.
-
- - Ideally we should add 1 to records_read for every possible null-
- complemented row. We're not doing it because: 1. it will require
- non-trivial code and add overhead. 2. The value of records_read
- is an inprecise estimate and adding 1 (or, in the worst case,
- #max_nested_outer_joins=64-1) will not make it any more precise.
- */
- if (pos->records_read)
+ /* Found a table we depend on */
+ found_ref= ~pos->table->table->map;
+ if (!found_ref)
{
- found= COST_MULT(found, pos->records_read);
- found*= pos->cond_selectivity;
+ /*
+ No more dependencies. We can use the cached values to improve things
+ a bit
+ */
+ if (pos->type == JT_EQ_REF)
+ found= COST_MULT(found, pos->identical_keys);
+ else if (pos->use_join_buffer)
+ found= COST_MULT(found, pos->loops / pos->refills);
}
- }
+ break;
+ }
+ if (unlikely(pos->use_join_buffer))
+ {
+ /* Each refill can change the cached key */
+ found/= pos->refills;
+ }
+ else
+ {
+ /*
+ We are not depending on the current table.
+ There are 'records_out' rows with identical rows
+ value for our depending tables.
+ */
+ found= COST_MULT(found, pos->records_out);
+ }
}
- return found;
+
+ /*
+ In most case found should <= record_count.
+
+ However if there was a reduction of rows (records_out < 1) before
+ the referencing table then found could be >= record_count.
+ To get resonable numbers, we limit prev_record_read to be between
+ 1.0 and record_count as we have to always do at least one read
+ anyway.
+ */
+
+end:
+ if (unlikely(found > record_count))
+ found= record_count;
+ if (unlikely(found <= 1.0))
+ found= 1.0;
+ *identical_keys= found;
+ return record_count / found;
}
@@ -11430,16 +12285,20 @@ bool JOIN::get_best_combination()
j->table= NULL; //temporary way to tell SJM tables from others.
j->ref.key = -1;
j->on_expr_ref= (Item**) &null_ptr;
- j->keys= key_map(1); /* The unique index is always in 'possible keys' in EXPLAIN */
+ /* The unique index is always in 'possible keys' in EXPLAIN */
+ j->keys= key_map(1);
/*
2. Proceed with processing SJM nest's join tabs, putting them into the
sub-order
*/
SJ_MATERIALIZATION_INFO *sjm= cur_pos->table->emb_sj_nest->sj_mat_info;
- j->records_read= (sjm->is_sj_scan? sjm->rows : 1);
+ j->records_read= (sjm->is_sj_scan? sjm->rows : 1.0);
+ j->records_init= j->records_out= j->records_read;
j->records= (ha_rows) j->records_read;
j->cond_selectivity= 1.0;
+ j->join_read_time= 0.0; /* Not saved currently */
+ j->join_loops= 0.0;
JOIN_TAB *jt;
JOIN_TAB_RANGE *jt_range;
if (!(jt= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*sjm->tables)) ||
@@ -11455,7 +12314,7 @@ bool JOIN::get_best_combination()
j= jt;
}
- *j= *best_positions[tablenr].table;
+ *j= *cur_pos->table;
j->bush_root_tab= sjm_nest_root;
@@ -11463,43 +12322,55 @@ bool JOIN::get_best_combination()
form->reginfo.join_tab=j;
DBUG_PRINT("info",("type: %d", j->type));
if (j->type == JT_CONST)
- goto loop_end; // Handled in make_join_stat..
+ goto loop_end; // Handled in make_join_stat..
- j->loosescan_match_tab= NULL; //non-nulls will be set later
+ j->loosescan_match_tab= NULL; //non-nulls will be set later
j->inside_loosescan_range= FALSE;
j->ref.key = -1;
j->ref.key_parts=0;
if (j->type == JT_SYSTEM)
goto loop_end;
- if ( !(keyuse= best_positions[tablenr].key))
+
+ if (!(keyuse= cur_pos->key))
{
- j->type=JT_ALL;
- if (best_positions[tablenr].use_join_buffer &&
+ if (cur_pos->type == JT_NEXT) // Forced index
+ {
+ j->type= JT_NEXT;
+ j->index= cur_pos->forced_index;
+ }
+ else
+ j->type= JT_ALL;
+ if (cur_pos->use_join_buffer &&
tablenr != const_tables)
full_join= 1;
}
-
- /*if (best_positions[tablenr].sj_strategy == SJ_OPT_LOOSE_SCAN)
- {
- DBUG_ASSERT(!keyuse || keyuse->key ==
- best_positions[tablenr].loosescan_picker.loosescan_key);
- j->index= best_positions[tablenr].loosescan_picker.loosescan_key;
- }*/
-
if ((j->type == JT_REF || j->type == JT_EQ_REF) &&
is_hash_join_key_no(j->ref.key))
hash_join= TRUE;
- j->range_rowid_filter_info= best_positions[tablenr].range_rowid_filter_info;
+ j->range_rowid_filter_info=
+ cur_pos->range_rowid_filter_info;
- loop_end:
- /*
+ /*
Save records_read in JOIN_TAB so that select_describe()/etc don't have
to access join->best_positions[].
*/
- j->records_read= best_positions[tablenr].records_read;
- j->cond_selectivity= best_positions[tablenr].cond_selectivity;
+ j->records_init= cur_pos->records_init;
+ j->records_read= cur_pos->records_read;
+ j->records_out= cur_pos->records_out;
+ j->join_read_time= cur_pos->read_time;
+ j->join_loops= cur_pos->loops;
+
+ loop_end:
+ j->cond_selectivity= cur_pos->cond_selectivity;
+ DBUG_ASSERT(j->cond_selectivity <= 1.0);
+ crash_if_first_double_is_bigger(j->records_out,
+ j->records_init *
+ (j->range_rowid_filter_info ?
+ j->range_rowid_filter_info->selectivity :
+ 1.0));
+
map2table[j->table->tablenr]= j;
/* If we've reached the end of sjm nest, switch back to main sequence */
@@ -11763,7 +12634,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
if (!keyparts && allow_full_scan)
{
/* It's a LooseIndexScan strategy scanning whole index */
- j->type= JT_ALL;
+ j->type= JT_ALL; // TODO: Check if this should be JT_NEXT
j->index= key;
DBUG_RETURN(FALSE);
}
@@ -11892,16 +12763,14 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
j->table->const_table= 1;
else if (!((keyparts == keyinfo->user_defined_key_parts &&
(
- (key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME ||
- /* Unique key and all keyparts are NULL rejecting */
- ((key_flags & HA_NOSAME) && keyparts == not_null_keyparts)
- )) ||
- /* true only for extended keys */
- (keyparts > keyinfo->user_defined_key_parts &&
- MY_TEST(key_flags & HA_EXT_NOSAME) &&
- keyparts == keyinfo->ext_key_parts)
- ) ||
- null_ref_key)
+ (key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME ||
+ /* Unique key and all keyparts are NULL rejecting */
+ ((key_flags & HA_NOSAME) && keyparts == not_null_keyparts)
+ )) ||
+ /* true only for extended keys */
+ (MY_TEST(key_flags & HA_EXT_NOSAME) &&
+ keyparts == keyinfo->ext_key_parts) ) ||
+ null_ref_key)
{
/* Must read with repeat */
j->type= null_ref_key ? JT_REF_OR_NULL : JT_REF;
@@ -12258,7 +13127,10 @@ make_outerjoin_info(JOIN *join)
{
if (embedding->is_active_sjm())
{
- /* We're trying to walk out of an SJ-Materialization nest. Don't do this. */
+ /*
+ We're trying to walk out of an SJ-Materialization nest.
+ Don't do this.
+ */
break;
}
/* Ignore sj-nests: */
@@ -12367,6 +13239,10 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
DBUG_ENTER("make_join_select");
if (select)
{
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_conditions(thd, "attaching_conditions_to_tables");
+ Json_writer_array trace_attached_comp(thd,
+ "attached_conditions_computation");
add_not_null_conds(join);
table_map used_tables;
/*
@@ -12406,8 +13282,10 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
trace_const_cond.add("condition_on_constant_tables", const_cond);
if (const_cond->is_expensive())
{
- trace_const_cond.add("evaluated", "false")
- .add("cause", "expensive cond");
+ if (unlikely(trace_const_cond.trace_started()))
+ trace_const_cond.
+ add("evalualted", "false").
+ add("cause", "expensive cond");
}
else
{
@@ -12419,8 +13297,10 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
if (!const_cond_result)
{
DBUG_PRINT("info",("Found impossible WHERE condition"));
- trace_const_cond.add("evaluated", "true")
- .add("found", "impossible where");
+ if (unlikely(trace_const_cond.trace_started()))
+ trace_const_cond.
+ add("evalualted", "true").
+ add("found", "impossible where");
join->exec_const_cond= NULL;
DBUG_RETURN(1);
}
@@ -12439,6 +13319,9 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
{
add_cond_and_fix(thd, &outer_ref_cond, join->outer_ref_cond);
join->outer_ref_cond= outer_ref_cond;
+
+ Json_writer_object trace(thd);
+ trace.add("outer_ref_cond", outer_ref_cond);
}
}
else
@@ -12454,6 +13337,9 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
add_cond_and_fix(thd, &pseudo_bits_cond,
join->pseudo_bits_cond);
join->pseudo_bits_cond= pseudo_bits_cond;
+
+ Json_writer_object trace(thd);
+ trace.add("pseudo_bits_cond", pseudo_bits_cond);
}
}
}
@@ -12462,10 +13348,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
/*
Step #2: Extract WHERE/ON parts
*/
- Json_writer_object trace_wrapper(thd);
- Json_writer_object trace_conditions(thd, "attaching_conditions_to_tables");
- Json_writer_array trace_attached_comp(thd,
- "attached_conditions_computation");
+
uint i;
for (i= join->top_join_tab_count - 1; i >= join->const_tables; i--)
{
@@ -12490,15 +13373,13 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
SELECT * FROM t1 LEFT OUTER JOIN (t2 JOIN t3) ON X
*/
JOIN_TAB *first_inner_tab= tab->first_inner;
+ COND *tmp;
if (!tab->bush_children)
current_map= tab->table->map;
else
current_map= tab->bush_children->start->emb_sj_nest->sj_inner_tables;
- bool use_quick_range=0;
- COND *tmp;
-
/*
Tables that are within SJ-Materialization nests cannot have their
conditions referring to preceding non-const tables.
@@ -12516,23 +13397,28 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
used_tables|=current_map;
- if (tab->type == JT_REF && tab->quick &&
+ if ((tab->type == JT_REF || tab->type == JT_RANGE) && tab->quick &&
(((uint) tab->ref.key == tab->quick->index &&
tab->ref.key_length < tab->quick->max_used_key_length) ||
(!is_hash_join_key_no(tab->ref.key) &&
tab->table->intersect_keys.is_set(tab->ref.key))))
{
/* Range uses longer key; Use this instead of ref on key */
- Json_writer_object ref_to_range(thd);
- ref_to_range.add("ref_to_range", true);
- ref_to_range.add("cause", "range uses longer key");
- tab->type=JT_ALL;
- use_quick_range=1;
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object ref_to_range(thd);
+ ref_to_range.
+ add("ref_to_range", true).
+ add("cause", "range uses longer key");
+ }
+ tab->type= JT_RANGE;
tab->use_quick=1;
tab->ref.key= -1;
tab->ref.key_parts=0; // Don't use ref key.
- join->best_positions[i].records_read= rows2double(tab->quick->records);
- /*
+ join->best_positions[i].records_read=
+ join->best_positions[i].records_out=
+ rows2double(tab->quick->records);
+ /*
We will use join cache here : prevent sorting of the first
table only and sort at the end.
*/
@@ -12590,7 +13476,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
if (cond && !tmp && tab->quick)
{ // Outer join
- if (tab->type != JT_ALL && !is_hj)
+ if ((tab->type != JT_ALL && tab->type != JT_RANGE) && !is_hj)
{
/*
Don't use the quick method
@@ -12616,7 +13502,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
tab->type == JT_EQ_REF || first_inner_tab)
{
DBUG_EXECUTE("where",print_where(tmp,
- tab->table? tab->table->alias.c_ptr() :"sjm-nest",
+ tab->table ?
+ tab->table->alias.c_ptr() :"sjm-nest",
QT_ORDINARY););
SQL_SELECT *sel= tab->select= ((SQL_SELECT*)
thd->memdup((uchar*) select,
@@ -12693,10 +13580,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
sel->quick_keys.clear_all();
sel->needed_reg.clear_all();
if (is_hj && tab->rowid_filter)
- {
- delete tab->rowid_filter;
- tab->rowid_filter= 0;
- }
+ tab->clear_range_rowid_filter();
}
else
{
@@ -12704,20 +13588,22 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
}
tab->quick=0;
}
- uint ref_key= sel->head? (uint) sel->head->reginfo.join_tab->ref.key+1 : 0;
+ uint ref_key= (sel->head ?
+ (uint) sel->head->reginfo.join_tab->ref.key+1 :
+ 0);
if (i == join->const_tables && ref_key)
{
if (!tab->const_keys.is_clear_all() &&
tab->table->reginfo.impossible_range)
DBUG_RETURN(1);
}
- else if (tab->type == JT_ALL && ! use_quick_range)
+ else if ((tab->type == JT_ALL || tab->type == JT_NEXT))
{
if (!tab->const_keys.is_clear_all() &&
tab->table->reginfo.impossible_range)
DBUG_RETURN(1); // Impossible range
/*
- We plan to scan all rows.
+ We plan to scan all rows either with table or index scan
Check again if we should use an index.
There are two cases:
@@ -12737,7 +13623,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
if (!tab->table->is_filled_at_execution() &&
!tab->loosescan_match_tab && // (1)
- ((cond && (!tab->keys.is_subset(tab->const_keys) && i > 0)) ||
+ ((cond && (!tab->keys.is_subset(tab->const_keys) &&
+ i > join->const_tables)) ||
(!tab->const_keys.is_clear_all() && i == join->const_tables &&
join->unit->lim.get_select_limit() <
join->best_positions[i].records_read &&
@@ -12788,7 +13675,23 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
/* Fix for EXPLAIN */
if (sel->quick)
- join->best_positions[i].records_read= (double)sel->quick->records;
+ {
+ join->best_positions[i].records_read=
+ (double) sel->quick->records;
+ set_if_smaller(join->best_positions[i].records_out,
+ join->best_positions[i].records_read);
+ }
+ else
+ {
+ /*
+ sel->head->opt_range_condition_rows may have been updated to a smaller number than
+ before by a call to test_quick_select. This can happen even if the range optimizer
+ decided to not use the range (sel->quick was not set).
+ */
+ set_if_smaller(join->best_positions[i].records_out,
+ rows2double(sel->head->opt_range_condition_rows));
+
+ }
}
else
{
@@ -12798,6 +13701,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
if (!sel->quick_keys.is_subset(tab->checked_keys) ||
!sel->needed_reg.is_subset(tab->checked_keys))
{
+ handler *file= tab->table->file;
/*
"Range checked for each record" is a "last resort" access method
that should only be used when the other option is a cross-product
@@ -12813,9 +13717,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
(sel->quick_keys.is_clear_all() ||
(sel->quick &&
sel->quick->read_time >
- tab->table->file->scan_time() +
- tab->table->file->stats.records/TIME_FOR_COMPARE
- ))) ?
+ file->cost(file->ha_scan_and_compare_time(tab->table->file-> stats.records))))) ?
2 : 1;
sel->read_tables= used_tables & ~current_map;
sel->quick_keys.clear_all();
@@ -13036,7 +13938,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
Item *const cond = tab->select_cond;
Json_writer_object trace_one_table(thd);
trace_one_table.add_table_name(tab);
- trace_one_table.add("attached", cond);
+ trace_one_table.add("attached_condition", cond);
}
}
}
@@ -13128,7 +14030,7 @@ bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys)
(uchar *) &first_keyuse,
FALSE))
return TRUE;
- table->reginfo.join_tab->keys.set_bit(table->s->keys);
+ table->reginfo.join_tab->keys.set_bit(table->s->keys - 1);
tab= table->reginfo.join_tab;
for (uint i=0; i < parts; i++)
tab->key_dependent|= save_first_keyuse[i].used_tables;
@@ -13155,36 +14057,45 @@ bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys)
static
bool generate_derived_keys(DYNAMIC_ARRAY *keyuse_array)
{
- KEYUSE *keyuse= dynamic_element(keyuse_array, 0, KEYUSE*);
+ KEYUSE *keyuse, *end_keyuse;
size_t elements= keyuse_array->elements;
TABLE *prev_table= 0;
- for (size_t i= 0; i < elements; i++, keyuse++)
+
+ DBUG_ASSERT(elements > 0);
+ /* The last element is an end marker */
+ DBUG_ASSERT(dynamic_element(keyuse_array, elements-1,
+ KEYUSE*)[0].table == 0);
+
+ for (keyuse= dynamic_element(keyuse_array, 0, KEYUSE*),
+ end_keyuse= keyuse + elements - 1;
+ keyuse < end_keyuse;
+ keyuse++)
{
- if (!keyuse->table)
- break;
+ DBUG_ASSERT(keyuse->table);
+
KEYUSE *first_table_keyuse= NULL;
table_map last_used_tables= 0;
uint count= 0;
uint keys= 0;
TABLE_LIST *derived= NULL;
+
if (keyuse->table != prev_table)
derived= keyuse->table->pos_in_table_list;
- while (derived && derived->is_materialized_derived())
+
+ if (!derived->is_materialized_derived())
+ continue;
+
+ for (;;)
{
if (keyuse->table != prev_table)
{
prev_table= keyuse->table;
while (keyuse->table == prev_table && keyuse->key != MAX_KEY)
- {
keyuse++;
- i++;
- }
if (keyuse->table != prev_table)
{
keyuse--;
- i--;
- derived= NULL;
- continue;
+ break;
}
first_table_keyuse= keyuse;
last_used_tables= keyuse->used_tables;
@@ -13198,14 +14109,12 @@ bool generate_derived_keys(DYNAMIC_ARRAY *keyuse_array)
}
count++;
keyuse++;
- i++;
if (keyuse->table != prev_table)
{
if (generate_derived_keys_for_table(first_table_keyuse, count, ++keys))
return TRUE;
keyuse--;
- i--;
- derived= NULL;
+ break;
}
}
}
@@ -13233,20 +14142,42 @@ void JOIN::drop_unused_derived_keys()
{
TABLE *tmp_tbl= tab->table;
- if (!tmp_tbl)
+ /*
+ Skip placeholders and already created tables (we cannot change keys
+ for created tables)
+ */
+ if (!tmp_tbl || tmp_tbl->is_created())
continue;
if (!tmp_tbl->pos_in_table_list->is_materialized_derived())
continue;
- if (tmp_tbl->max_keys > 1 && !tab->is_ref_for_hash_join())
- tmp_tbl->use_index(tab->ref.key);
- if (tmp_tbl->s->keys)
+
+ /*
+ tmp_tbl->max_keys is the number of keys pre-allocated in
+ TABLE::alloc_keys(). Can be 0 if alloc_keys() was not called.
+
+ tmp_tbl->s->keys is number of keys defined for the table.
+ Normally 0 or 1 (= unique key)
+ */
+
+ if (likely(tmp_tbl->s->keys) && tab->ref.key >= 0 &&
+ !tab->is_ref_for_hash_join())
{
- if (tab->ref.key >= 0 && tab->ref.key < MAX_KEY)
- tab->ref.key= 0;
- else
- tmp_tbl->s->keys= 0;
+ if (tmp_tbl->s->keys > 1)
+ {
+ /* remove all keys except the chosen one and unique keys */
+ tmp_tbl->use_index(tab->ref.key, &tab->keys);
+ }
+ /*
+ We dropped all keys except the chosen one and unique keys.
+ The choosen one is stored as the first key (number 0).
+ */
+ tab->ref.key= 0;
+ }
+ else if (tmp_tbl->s->keys)
+ {
+ /* The query cannot use keys, remove all non unique keys */
+ tmp_tbl->use_index(-1, &tab->keys);
}
- tab->keys= (key_map) (tmp_tbl->s->keys ? 1 : 0);
}
}
@@ -13356,6 +14287,7 @@ void set_join_cache_denial(JOIN_TAB *join_tab)
don't do join buffering for the first table in sjm nest.
*/
join_tab[-1].next_select= sub_select;
+ join_tab[-1].cached_pfs_batch_update= join_tab[-1].pfs_batch_update();
if (join_tab->type == JT_REF && join_tab->is_ref_for_hash_join())
{
join_tab->type= JT_ALL;
@@ -13676,7 +14608,6 @@ uint check_join_cache_usage(JOIN_TAB *tab,
uint table_index,
JOIN_TAB *prev_tab)
{
- Cost_estimate cost;
uint flags= 0;
ha_rows rows= 0;
uint bufsz= 4096;
@@ -13811,7 +14742,9 @@ uint check_join_cache_usage(JOIN_TAB *tab,
prev_cache= prev_tab->cache;
switch (tab->type) {
+ case JT_NEXT:
case JT_ALL:
+ case JT_RANGE:
if (cache_level == 1)
prev_cache= 0;
if ((tab->cache= new (root) JOIN_CACHE_BNL(join, tab, prev_cache)))
@@ -13834,6 +14767,8 @@ uint check_join_cache_usage(JOIN_TAB *tab,
if (!tab->is_ref_for_hash_join() && !no_bka_cache)
{
+ Cost_estimate cost;
+ cost.reset();
flags= HA_MRR_NO_NULL_ENDPOINTS | HA_MRR_SINGLE_POINT;
if (tab->table->covering_keys.is_set(tab->ref.key))
flags|= HA_MRR_INDEX_ONLY;
@@ -13892,7 +14827,7 @@ uint check_join_cache_usage(JOIN_TAB *tab,
}
no_join_cache:
- if (tab->type != JT_ALL && tab->is_ref_for_hash_join())
+ if (tab->type != JT_ALL && tab->type != JT_RANGE && tab->is_ref_for_hash_join())
{
tab->type= JT_ALL;
tab->ref.key_parts= 0;
@@ -13972,7 +14907,9 @@ restart:
case JT_EQ_REF:
case JT_REF:
case JT_REF_OR_NULL:
+ case JT_NEXT:
case JT_ALL:
+ case JT_RANGE:
tab->used_join_cache_level= check_join_cache_usage(tab, options,
no_jbuf_after,
idx,
@@ -14108,6 +15045,9 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
uint i;
DBUG_ENTER("make_join_readinfo");
+ Json_writer_object trace_wrapper(join->thd);
+ Json_writer_array trace_arr(join->thd, "make_join_readinfo");
+
bool statistics= MY_TEST(!(join->select_options & SELECT_DESCRIBE));
bool sorted= 1;
@@ -14137,18 +15077,21 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
if (tab->bush_root_tab && tab->bush_root_tab->bush_children->start == tab)
prev_tab= NULL;
- DBUG_ASSERT(tab->bush_children || tab->table == join->best_positions[i].table->table);
+ DBUG_ASSERT(tab->bush_children ||
+ tab->table == join->best_positions[i].table->table);
tab->partial_join_cardinality= join->best_positions[i].records_read *
- (prev_tab? prev_tab->partial_join_cardinality : 1);
+ (prev_tab ?
+ prev_tab->partial_join_cardinality : 1);
if (!tab->bush_children)
i++;
}
check_join_cache_usage_for_tables(join, options, no_jbuf_after);
-
+
JOIN_TAB *first_tab;
- for (tab= first_tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
+ for (tab= first_tab= first_linear_tab(join,
+ WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
tab;
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
@@ -14174,10 +15117,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
*/
if (!(tab->bush_root_tab &&
tab->bush_root_tab->bush_children->end == tab + 1))
- {
- tab->next_select=sub_select; /* normal select */
- }
-
+ tab->next_select= sub_select; /* normal select */
if (tab->loosescan_match_tab)
{
@@ -14226,13 +15166,35 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
(!jcl || jcl > 4) && !tab->ref.is_access_triggered())
push_index_cond(tab, tab->ref.key);
break;
+ case JT_NEXT: // Index scan
+ DBUG_ASSERT(!(tab->select && tab->select->quick));
+ if (tab->use_quick == 2)
+ {
+ join->thd->set_status_no_good_index_used();
+ tab->read_first_record= join_init_quick_read_record;
+ if (statistics)
+ join->thd->inc_status_select_range_check();
+ }
+ else
+ {
+ tab->read_first_record= join_read_first;
+ if (statistics)
+ {
+ join->thd->inc_status_select_scan();
+ join->thd->query_plan_flags|= QPLAN_FULL_SCAN;
+ }
+ }
+ break;
case JT_ALL:
+ case JT_RANGE:
case JT_HASH:
+ {
+ bool have_quick_select= tab->select && tab->select->quick;
/*
If previous table use cache
If the incoming data set is already sorted don't use cache.
Also don't use cache if this is the first table in semi-join
- materialization nest.
+ materialization nest.
*/
/* These init changes read_record */
if (tab->use_quick == 2)
@@ -14265,7 +15227,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
}
else
{
- if (tab->select && tab->select->quick)
+ if (have_quick_select)
{
if (statistics)
join->thd->inc_status_select_full_range_join();
@@ -14282,41 +15244,27 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
}
if (!table->no_keyread)
{
- if (!(tab->select && tab->select->quick &&
- tab->select->quick->index != MAX_KEY && //not index_merge
- table->covering_keys.is_set(tab->select->quick->index)) &&
- (!table->covering_keys.is_clear_all() &&
- !(tab->select && tab->select->quick)))
+ if (!(have_quick_select &&
+ tab->select->quick->index != MAX_KEY && //not index_merge
+ table->covering_keys.is_set(tab->select->quick->index)) &&
+ (!table->covering_keys.is_clear_all() && ! have_quick_select))
{ // Only read index tree
if (tab->loosescan_match_tab)
tab->index= tab->loosescan_key;
else
- {
-#ifdef BAD_OPTIMIZATION
- /*
- It has turned out that the below change, while speeding things
- up for disk-bound loads, slows them down for cases when the data
- is in disk cache (see BUG#35850):
- See bug #26447: "Using the clustered index for a table scan
- is always faster than using a secondary index".
- */
- if (table->file->pk_is_clustering_key(table->s->primary_key))
- tab->index= table->s->primary_key;
- else
-#endif
- tab->index=find_shortest_key(table, & table->covering_keys);
- }
+ tab->index= tab->cached_covering_key;
tab->read_first_record= join_read_first;
/* Read with index_first / index_next */
- tab->type= tab->type == JT_ALL ? JT_NEXT : JT_HASH_NEXT;
+ tab->type= tab->type == JT_ALL ? JT_NEXT : JT_HASH_NEXT;
}
}
- if (tab->select && tab->select->quick &&
+ if (have_quick_select &&
tab->select->quick->index != MAX_KEY &&
!tab->table->covering_keys.is_set(tab->select->quick->index))
push_index_cond(tab, tab->select->quick->index);
}
break;
+ }
case JT_FT:
break;
/* purecov: begin deadcode */
@@ -14328,6 +15276,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
abort();
/* purecov: end */
}
+ tab->cached_pfs_batch_update= tab->pfs_batch_update();
DBUG_EXECUTE("where",
char buff[256];
@@ -14418,7 +15367,7 @@ bool error_if_full_join(JOIN *join)
for (JOIN_TAB *tab=first_top_level_tab(join, WITH_CONST_TABLES); tab;
tab= next_top_level_tab(join, tab))
{
- if (tab->type == JT_ALL && (!tab->select || !tab->select->quick))
+ if ((tab->type == JT_ALL || tab->type == JT_NEXT))
{
my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
ER_THD(join->thd,
@@ -14430,37 +15379,58 @@ bool error_if_full_join(JOIN *join)
}
-void JOIN_TAB::build_range_rowid_filter_if_needed()
+/**
+ build_range_rowid_filter()
+
+ Build range rowid filter. This function should only be called if
+ need_to_build_rowid_filter
+ is true
+*/
+
+void JOIN_TAB::build_range_rowid_filter()
{
- if (rowid_filter && !is_rowid_filter_built)
- {
- /**
- The same handler object (table->file) is used to build a filter
- and to perfom a primary table access (by the main query).
+ DBUG_ASSERT(need_to_build_rowid_filter && rowid_filter);
- To estimate the time for filter building tracker should be changed
- and after building of the filter has been finished it should be
- switched back to the previos tracker.
- */
- Exec_time_tracker *table_tracker= table->file->get_time_tracker();
- Rowid_filter_tracker *rowid_tracker= rowid_filter->get_tracker();
- table->file->set_time_tracker(rowid_tracker->get_time_tracker());
- rowid_tracker->start_tracking(join->thd);
- if (!rowid_filter->build())
- {
- is_rowid_filter_built= true;
- }
- else
- {
- delete rowid_filter;
- rowid_filter= 0;
- }
- rowid_tracker->stop_tracking(join->thd);
- table->file->set_time_tracker(table_tracker);
+ /**
+ The same handler object (table->file) is used to build a filter
+ and to perfom a primary table access (by the main query).
+
+ To estimate the time for filter building tracker should be changed
+ and after building of the filter has been finished it should be
+ switched back to the previos tracker.
+ */
+
+ Exec_time_tracker *table_tracker= table->file->get_time_tracker();
+ Rowid_filter_tracker *rowid_tracker= rowid_filter->get_tracker();
+ table->file->set_time_tracker(rowid_tracker->get_time_tracker());
+ rowid_tracker->start_tracking(join->thd);
+
+ if (rowid_filter->build())
+ {
+ /* Failed building rowid filter */
+ clear_range_rowid_filter();
}
+ need_to_build_rowid_filter= false;
+ rowid_tracker->stop_tracking(join->thd);
+ table->file->set_time_tracker(table_tracker);
}
+/*
+ Clear used rowid filter
+
+ Note that rowid_filter is allocated on mem_root and not really freed!
+ Only the rowid data is freed.
+*/
+
+void JOIN_TAB::clear_range_rowid_filter()
+{
+ delete rowid_filter;
+ rowid_filter= 0;
+ need_to_build_rowid_filter= false;
+ range_rowid_filter_info= 0;
+}
+
/**
cleanup JOIN_TAB.
@@ -14481,10 +15451,7 @@ void JOIN_TAB::cleanup()
delete quick;
quick= 0;
if (rowid_filter)
- {
- delete rowid_filter;
- rowid_filter= 0;
- }
+ clear_range_rowid_filter();
if (cache)
{
cache->free();
@@ -14530,7 +15497,7 @@ void JOIN_TAB::cleanup()
end_read_record(&read_record);
tmp->jtbm_subselect->cleanup();
/*
- The above call freed the materializedd temptable. Set it to NULL so
+ The above call freed the materialized temptable. Set it to NULL so
that we don't attempt to touch it if JOIN_TAB::cleanup() is invoked
multiple times (it may be)
*/
@@ -14553,56 +15520,97 @@ void JOIN_TAB::cleanup()
/**
Estimate the time to get rows of the joined table
+
+ Updates found_records, records, cached_covering_key, read_time and
+ cache_scan_and_compare_time
*/
-double JOIN_TAB::scan_time()
+void JOIN_TAB::estimate_scan_time()
{
- double res;
+ THD *thd= join->thd;
+ handler *file= table->file;
+ double copy_cost;
+
+ cached_covering_key= MAX_KEY;
if (table->is_created())
{
if (table->is_filled_at_execution())
{
get_delayed_table_estimates(table, &records, &read_time,
- &startup_cost);
- found_records= records;
+ &startup_cost);
table->opt_range_condition_rows= records;
+ table->used_stat_records= records;
+ copy_cost= file->ROW_COPY_COST;
}
else
{
- found_records= records= table->stat_records();
- read_time= table->file->scan_time();
+ records= table->stat_records();
/*
table->opt_range_condition_rows has already been set to
table->file->stats.records
*/
+ DBUG_ASSERT(table->opt_range_condition_rows == records);
+
+ if (!table->covering_keys.is_clear_all() && ! table->no_keyread)
+ {
+ cached_covering_key= find_shortest_key(table, &table->covering_keys);
+ read_time= file->cost(file->ha_key_scan_time(cached_covering_key,
+ records));
+ copy_cost= 0; // included in ha_key_scan_time
+ }
+ else
+ {
+ read_time= file->cost(file->ha_scan_time(records));
+ copy_cost= 0;
+ }
}
- res= read_time;
}
else
{
- found_records= records=table->stat_records();
- read_time= found_records ? (double)found_records: 10.0;// TODO:fix this stub
- res= read_time;
+ /*
+ The following is same as calling
+ TABLE_SHARE::update_optimizer_costs, but without locks
+ */
+ if (table->s->db_type() == heap_hton)
+ memcpy(&table->s->optimizer_costs, &heap_optimizer_costs,
+ sizeof(heap_optimizer_costs));
+ else
+ memcpy(&table->s->optimizer_costs, &tmp_table_optimizer_costs,
+ sizeof(tmp_table_optimizer_costs));
+ file->set_optimizer_costs(thd);
+ table->s->optimizer_costs_inited=1;
+
+ records= table->stat_records();
+ DBUG_ASSERT(table->opt_range_condition_rows == records);
+ // Needs fix..
+ read_time= file->cost(table->file->ha_scan_time(MY_MAX(records, 1000)));
+ copy_cost= table->s->optimizer_costs.row_copy_cost;
}
- return res;
+
+ found_records= records;
+ cached_scan_and_compare_time= (read_time + records *
+ (copy_cost + WHERE_COST_THD(thd)));
}
/**
- Estimate the number of rows that a an access method will read from a table.
+ Estimate the number of rows that an access method will read from a table.
- @todo: why not use JOIN_TAB::found_records
+ @todo: why not use JOIN_TAB::found_records or JOIN_TAB::records_read
*/
-ha_rows JOIN_TAB::get_examined_rows()
+double JOIN_TAB::get_examined_rows()
{
double examined_rows;
SQL_SELECT *sel= filesort? filesort->select : this->select;
if (sel && sel->quick && use_quick != 2)
- examined_rows= (double)sel->quick->records;
- else if (type == JT_NEXT || type == JT_ALL ||
- type == JT_HASH || type ==JT_HASH_NEXT)
+ {
+ examined_rows= (double) sel->quick->records;
+ DBUG_ASSERT(examined_rows == sel->quick->records);
+ }
+ else if (type == JT_NEXT || type == JT_ALL || type == JT_RANGE ||
+ type == JT_HASH || type == JT_HASH_NEXT)
{
if (limit)
{
@@ -14627,11 +15635,11 @@ ha_rows JOIN_TAB::get_examined_rows()
}
}
else
- examined_rows= records_read;
+ examined_rows= records_init;
if (examined_rows >= (double) HA_ROWS_MAX)
- return HA_ROWS_MAX;
- return (ha_rows) examined_rows;
+ return (double) HA_ROWS_MAX;
+ return examined_rows;
}
@@ -14641,6 +15649,7 @@ ha_rows JOIN_TAB::get_examined_rows()
TODO: consider moving this together with join_tab_execution_startup
*/
+
bool JOIN_TAB::preread_init()
{
TABLE_LIST *derived= table->pos_in_table_list;
@@ -14682,7 +15691,31 @@ bool JOIN_TAB::preread_init()
}
-bool JOIN_TAB::pfs_batch_update(JOIN *join)
+/**
+ pfs_batch_update()
+
+ Check if the used table will do a lot of read calls in a row without
+ any intervening read calls to any other tables.
+
+ @return 0 No
+ @return 1 Yes
+
+ If yes, then the handler will be informed about this with the
+ start_psi_batch_mode() / end_psi_batch_mode() calls
+
+ This is currently used only to speed up performance schema code for
+ multiple reads.
+
+ In the future we may also inform the engine about this. The engine
+ could use this information to cache the used pages, keep blocks
+ locked in the page cache and similar things to speed up repeated
+ reads.
+
+ The return value of this function is cached in
+ JOIN_TAB::cached_pfs_batch_update
+*/
+
+bool JOIN_TAB::pfs_batch_update()
{
/*
Use PFS batch mode if
@@ -15017,7 +16050,6 @@ void JOIN::cleanup(bool full)
if (current_ref_ptrs != items0)
{
set_items_ref_array(items0);
- set_group_rpa= false;
}
DBUG_VOID_RETURN;
}
@@ -16527,7 +17559,7 @@ static COND *build_equal_items(JOIN *join, COND *cond,
table->on_expr= build_equal_items(join, table->on_expr, inherited,
nested_join_list, ignore_on_conds,
&table->cond_equal);
- if (unlikely(join->thd->trace_started()))
+ if (unlikely(thd->trace_started()))
{
const char *table_name;
if (table->nested_join)
@@ -17920,25 +18952,28 @@ static bool check_interleaving_with_nj(JOIN_TAB *next_tab)
Do update counters for "pairs of brackets" that we've left (marked as
X,Y,Z in the above picture)
*/
- for (;next_emb && next_emb != join->emb_sjm_nest; next_emb= next_emb->embedding)
+ for (;next_emb && next_emb != join->emb_sjm_nest;
+ next_emb= next_emb->embedding)
{
if (!next_emb->sj_on_expr)
{
next_emb->nested_join->counter++;
if (next_emb->nested_join->counter == 1)
{
- /*
+ /*
next_emb is the first table inside a nested join we've "entered". In
- the picture above, we're looking at the 'X' bracket. Don't exit yet as
- X bracket might have Y pair bracket.
+ the picture above, we're looking at the 'X' bracket. Don't exit yet
+ as X bracket might have Y pair bracket.
*/
join->cur_embedding_map |= next_emb->nested_join->nj_map;
}
+ DBUG_ASSERT(next_emb->nested_join->n_tables >=
+ next_emb->nested_join->counter);
+
if (next_emb->nested_join->n_tables !=
next_emb->nested_join->counter)
break;
-
/*
We're currently at Y or Z-bracket as depicted in the above picture.
Mark that we've left it and continue walking up the brackets hierarchy.
@@ -18152,7 +19187,7 @@ table_map JOIN::get_allowed_nj_tables(uint idx)
first_alt TRUE <=> Use the LooseScan plan for the first_tab
no_jbuf_before Don't allow to use join buffering before this
table
- reopt_rec_count OUT New output record count
+ outer_rec_count OUT New output record count
reopt_cost OUT New join prefix cost
DESCRIPTION
@@ -18207,6 +19242,8 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
table_map save_cur_sj_inner_tables= join->cur_sj_inner_tables;
join->cur_sj_inner_tables= 0;
+ double inner_fanout= 1.0;
+
for (i= first_tab; i <= last_tab; i++)
{
JOIN_TAB *rs= join->positions[i].table;
@@ -18219,34 +19256,54 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
join->positions, i,
TRUE, rec_count,
&pos, &loose_scan_pos);
+ if ((i == first_tab && first_alt))
+ pos= loose_scan_pos;
}
else
pos= join->positions[i];
- if ((i == first_tab && first_alt))
- pos= loose_scan_pos;
-
reopt_remaining_tables &= ~rs->table->map;
- rec_count= COST_MULT(rec_count, pos.records_read);
cost= COST_ADD(cost, pos.read_time);
- cost= COST_ADD(cost, rec_count / TIME_FOR_COMPARE);
- //TODO: take into account join condition selectivity here
- double pushdown_cond_selectivity= 1.0;
- table_map real_table_bit= rs->table->map;
- if (join->thd->variables.optimizer_use_condition_selectivity > 1)
+
+ double records_out= pos.records_out;
+ /*
+ The (i != last_tab) is here to mimic what
+ best_extension_by_limited_search() does: do not call
+ table_after_join_selectivity() for the join_tab where the semi-join
+ strategy is applied
+ */
+ if (i != last_tab &&
+ join->thd->variables.optimizer_use_condition_selectivity > 1)
{
- pushdown_cond_selectivity= table_cond_selectivity(join, i, rs,
- reopt_remaining_tables &
- ~real_table_bit);
+ table_map real_table_bit= rs->table->map;
+ double __attribute__((unused)) pushdown_cond_selectivity;
+ pushdown_cond_selectivity=
+ table_after_join_selectivity(join, i, rs,
+ reopt_remaining_tables &
+ ~real_table_bit, &records_out);
}
- (*outer_rec_count) *= pushdown_cond_selectivity;
- if (!rs->emb_sj_nest)
- *outer_rec_count= COST_MULT(*outer_rec_count, pos.records_read);
+ rec_count= COST_MULT(rec_count, records_out);
+ *outer_rec_count= COST_MULT(*outer_rec_count, records_out);
+ if (rs->emb_sj_nest)
+ inner_fanout= COST_MULT(inner_fanout, records_out);
}
+
+ /* Discount the fanout produced by the subquery */
+ if (inner_fanout > 1.0)
+ *outer_rec_count /= inner_fanout;
+
join->cur_sj_inner_tables= save_cur_sj_inner_tables;
*reopt_cost= cost;
+ if (rec_count < *outer_rec_count)
+ {
+ /*
+ The tables inside the subquery produce smaller fanout than outer tables.
+ This can happen in edge cases.
+ */
+ *outer_rec_count= rec_count;
+ }
}
@@ -18279,8 +19336,11 @@ optimize_cond(JOIN *join, COND *conds,
Json_writer_object trace_wrapper(thd);
Json_writer_object trace_cond(thd, "condition_processing");
- trace_cond.add("condition", join->conds == conds ? "WHERE" : "HAVING")
- .add("original_condition", conds);
+
+ if (unlikely(trace_cond.trace_started()))
+ trace_cond.
+ add("condition", join->conds == conds ? "WHERE" : "HAVING").
+ add("original_condition", conds);
Json_writer_array trace_steps(thd, "steps");
DBUG_EXECUTE("where", print_where(conds, "original", QT_ORDINARY););
@@ -18288,10 +19348,13 @@ optimize_cond(JOIN *join, COND *conds,
ignore_on_conds, cond_equal,
MY_TEST(flags & OPT_LINK_EQUAL_FIELDS));
DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY););
+
+ if (unlikely(thd->trace_started()))
{
Json_writer_object equal_prop_wrapper(thd);
- equal_prop_wrapper.add("transformation", "equality_propagation")
- .add("resulting_condition", conds);
+ equal_prop_wrapper.
+ add("transformation", "equality_propagation").
+ add("resulting_condition", conds);
}
/* change field = field to field = const for each found field = const */
@@ -18301,20 +19364,24 @@ optimize_cond(JOIN *join, COND *conds,
Remove all and-levels where CONST item != CONST item
*/
DBUG_EXECUTE("where",print_where(conds,"after const change", QT_ORDINARY););
+ if (unlikely(thd->trace_started()))
{
Json_writer_object const_prop_wrapper(thd);
- const_prop_wrapper.add("transformation", "constant_propagation")
- .add("resulting_condition", conds);
+ const_prop_wrapper.
+ add("transformation", "constant_propagation").
+ add("resulting_condition", conds);
}
conds= conds->remove_eq_conds(thd, cond_value, true);
if (conds && conds->type() == Item::COND_ITEM &&
((Item_cond*) conds)->functype() == Item_func::COND_AND_FUNC)
*cond_equal= &((Item_cond_and*) conds)->m_cond_equal;
+ if (unlikely(thd->trace_started()))
{
Json_writer_object cond_removal_wrapper(thd);
- cond_removal_wrapper.add("transformation", "trivial_condition_removal")
- .add("resulting_condition", conds);
+ cond_removal_wrapper.
+ add("transformation", "trivial_condition_removal").
+ add("resulting_condition", conds);
}
DBUG_EXECUTE("info",print_where(conds,"after remove", QT_ORDINARY););
}
@@ -18531,7 +19598,7 @@ bool cond_is_datetime_is_null(Item *cond)
=> SELECT * FROM t1 WHERE ((FALSE AND (a = 5)) OR
((b = 5) AND (a = 5))) AND
(b = 5) AND (a = 5)
- After this an additional call of remove_eq_conds() converts it to
+ After this an additional call of remove_eq_conds() converts it to
=> SELECT * FROM t1 WHERE (b = 5) AND (a = 5)
*/
@@ -18644,7 +19711,7 @@ Item_cond::remove_eq_conds(THD *thd, Item::cond_result *cond_value,
else
{
if (new_item->type() == Item::COND_ITEM &&
- ((Item_cond*) new_item)->functype() == functype())
+ ((Item_cond*) new_item)->functype() == functype())
{
List<Item> *new_item_arg_list=
((Item_cond *) new_item)->argument_list();
@@ -19621,6 +20688,7 @@ TABLE *Create_tmp_table::start(THD *thd,
sizeof(*m_key_part_info)*(param->group_parts+1),
&param->start_recinfo,
sizeof(*param->recinfo)*(field_count*2+4),
+ &param->rec_per_key, sizeof(ulong)*param->group_parts,
&tmpname, (uint) strlen(path)+1,
&m_group_buff, (m_group && ! m_using_unique_constraint ?
param->group_length : 0),
@@ -19949,7 +21017,7 @@ bool Create_tmp_table::finalize(THD *thd,
bool save_abort_on_warning;
uchar *pos;
uchar *null_flags;
- KEY *keyinfo;
+ KEY *keyinfo= param->keyinfo;
TMP_ENGINE_COLUMNDEF *recinfo;
TABLE_SHARE *share= table->s;
Copy_field *copy= param->copy_field;
@@ -20141,12 +21209,10 @@ bool Create_tmp_table::finalize(THD *thd,
set_if_smaller(share->max_rows, m_rows_limit);
param->end_write_records= m_rows_limit;
- keyinfo= param->keyinfo;
-
if (m_group)
{
DBUG_PRINT("info",("Creating group key in temporary table"));
- table->group= m_group; /* Table is grouped by key */
+ table->group= m_group; /* Table is grouped by key */
param->group_buff= m_group_buff;
share->keys=1;
share->uniques= MY_TEST(m_using_unique_constraint);
@@ -20156,15 +21222,18 @@ bool Create_tmp_table::finalize(THD *thd,
keyinfo->key_part= m_key_part_info;
keyinfo->flags=HA_NOSAME | HA_BINARY_PACK_KEY | HA_PACK_KEY;
keyinfo->ext_key_flags= keyinfo->flags;
- keyinfo->usable_key_parts=keyinfo->user_defined_key_parts= param->group_parts;
+ keyinfo->usable_key_parts=keyinfo->user_defined_key_parts=
+ param->group_parts;
keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
+ share->ext_key_parts= share->key_parts= keyinfo->ext_key_parts;
keyinfo->key_length=0;
- keyinfo->rec_per_key=NULL;
+ keyinfo->rec_per_key= param->rec_per_key;
keyinfo->read_stats= NULL;
keyinfo->collected_stats= NULL;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
keyinfo->is_statistics_from_stat_tables= FALSE;
keyinfo->name= group_key;
+ keyinfo->comment.str= 0;
ORDER *cur_group= m_group;
for (; cur_group ; cur_group= cur_group->next, m_key_part_info++)
{
@@ -20265,6 +21334,7 @@ bool Create_tmp_table::finalize(THD *thd,
keyinfo->usable_key_parts= keyinfo->user_defined_key_parts;
table->distinct= 1;
share->keys= 1;
+ share->ext_key_parts= share->key_parts= keyinfo->ext_key_parts;
if (!(m_key_part_info= (KEY_PART_INFO*)
alloc_root(&table->mem_root,
keyinfo->user_defined_key_parts * sizeof(KEY_PART_INFO))))
@@ -20369,6 +21439,8 @@ bool Create_tmp_table::finalize(THD *thd,
m_key_part_info++;
}
}
+ if (share->keys)
+ keyinfo->index_flags= table->file->index_flags(0, 0, 1);
if (unlikely(thd->is_fatal_error)) // If end of memory
goto err; /* purecov: inspected */
@@ -20483,7 +21555,7 @@ TABLE *create_tmp_table_for_schema(THD *thd, TMP_TABLE_PARAM *param,
{
TABLE *table;
Create_tmp_table maker((ORDER *) NULL, false, false,
- select_options, HA_POS_ERROR);
+ select_options, HA_ROWS_MAX);
if (!(table= maker.start(thd, param, &table_alias)) ||
maker.add_schema_fields(thd, table, param, schema_table) ||
maker.finalize(thd, table, param, do_not_open, keep_row_order))
@@ -20663,7 +21735,6 @@ bool Virtual_tmp_table::sp_set_all_fields_from_item(THD *thd, Item *value)
return false;
}
-
bool open_tmp_table(TABLE *table)
{
int error;
@@ -20677,6 +21748,7 @@ bool open_tmp_table(TABLE *table)
}
table->db_stat= HA_OPEN_KEYFILE;
(void) table->file->extra(HA_EXTRA_QUICK); /* Faster */
+ table->file->set_optimizer_costs(table->in_use);
if (!table->is_created())
{
table->set_created();
@@ -20759,6 +21831,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
/* Can't create a key; Make a unique constraint instead of a key */
share->keys= 0;
+ share->key_parts= share->ext_key_parts= 0;
share->uniques= 1;
using_unique_constraint=1;
bzero((char*) &uniquedef,sizeof(uniquedef));
@@ -20823,6 +21896,8 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
keydef.flag|= HA_NULL_ARE_EQUAL;
}
}
+ if (share->keys)
+ keyinfo->index_flags= table->file->index_flags(0, 0, 1);
}
bzero((char*) &create_info,sizeof(create_info));
create_info.data_file_length= table->in_use->variables.tmp_disk_table_size;
@@ -20953,6 +22028,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
{
/* Can't create a key; Make a unique constraint instead of a key */
share->keys= 0;
+ share->key_parts= share->ext_key_parts= 0;
share->uniques= 1;
using_unique_constraint=1;
bzero((char*) &uniquedef,sizeof(uniquedef));
@@ -21016,6 +22092,8 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
keydef.flag|= HA_NULL_ARE_EQUAL;
}
}
+ if (share->keys)
+ keyinfo->index_flags= table->file->index_flags(0, 0, 1);
}
MI_CREATE_INFO create_info;
bzero((char*) &create_info,sizeof(create_info));
@@ -21067,6 +22145,7 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
TABLE_SHARE share;
const char *save_proc_info;
int write_err= 0;
+ String tmp_alias;
DBUG_ENTER("create_internal_tmp_table_from_heap");
if (is_duplicate)
*is_duplicate= FALSE;
@@ -21159,9 +22238,18 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
plugin_unlock(0, table->s->db_plugin);
share.db_plugin= my_plugin_lock(0, share.db_plugin);
new_table.s= table->s; // Keep old share
+
+ /*
+ The following work with alias has to be done as new_table.alias() may have
+ been reallocated and we want to keep the original one.
+ */
+ tmp_alias.move(table->alias);
*table= new_table;
+ table->alias.move(tmp_alias);
+ new_table.alias.free();
+ /* Get the new share */
*table->s= share;
-
+
table->file->change_table_ptr(table, table->s);
table->use_all_columns();
if (save_proc_info)
@@ -21352,6 +22440,7 @@ do_select(JOIN *join, Procedure *procedure)
{
int rc= 0;
enum_nested_loop_state error= NESTED_LOOP_OK;
+ uint top_level_tables= join->exec_join_tab_cnt();
DBUG_ENTER("do_select");
if (join->pushdown_query)
@@ -21368,8 +22457,9 @@ do_select(JOIN *join, Procedure *procedure)
if (join->pushdown_query->store_data_in_temp_table)
{
- JOIN_TAB *last_tab= join->join_tab + join->exec_join_tab_cnt();
+ JOIN_TAB *last_tab= join->join_tab + top_level_tables;
last_tab->next_select= end_send;
+ last_tab->cached_pfs_batch_update= last_tab->pfs_batch_update();
enum_nested_loop_state state= last_tab->aggr->end_send();
if (state >= NESTED_LOOP_OK)
@@ -21386,6 +22476,7 @@ do_select(JOIN *join, Procedure *procedure)
join->procedure= procedure;
join->duplicate_rows= join->send_records=0;
+
if (join->only_const_tables() && !join->need_tmp)
{
Next_select_func end_select= setup_end_select_func(join, NULL);
@@ -21458,6 +22549,17 @@ do_select(JOIN *join, Procedure *procedure)
dbug_serve_apcs(join->thd, 1);
);
+ /*
+ We have to update the cached_pfs_batch_update as
+ join_tab->select_cond may have changed.
+
+ This can happen in case of group by where some sub queries are not
+ needed anymore. This is checked by main.ps
+ */
+ if (top_level_tables)
+ join->join_tab[top_level_tables-1].cached_pfs_batch_update=
+ join->join_tab[top_level_tables-1].pfs_batch_update();
+
JOIN_TAB *join_tab= join->join_tab +
(join->tables_list ? join->const_tables : 0);
if (join->outer_ref_cond && !join->outer_ref_cond->val_int())
@@ -21849,6 +22951,8 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
enum_nested_loop_state
sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
{
+ int error;
+ enum_nested_loop_state rc;
DBUG_ENTER("sub_select");
if (join_tab->last_inner)
@@ -21868,10 +22972,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
}
join_tab->tracker->r_scans++;
- int error;
- enum_nested_loop_state rc= NESTED_LOOP_OK;
- READ_RECORD *info= &join_tab->read_record;
-
+ rc= NESTED_LOOP_OK;
for (SJ_TMP_TABLE *flush_dups_table= join_tab->flush_weedout_table;
flush_dups_table;
@@ -21883,9 +22984,21 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
if (!join_tab->preread_init_done && join_tab->preread_init())
DBUG_RETURN(NESTED_LOOP_ERROR);
- join_tab->build_range_rowid_filter_if_needed();
- if (join_tab->rowid_filter && join_tab->rowid_filter->is_empty())
- rc= NESTED_LOOP_NO_MORE_ROWS;
+ if (unlikely(join_tab->rowid_filter))
+ {
+ if (unlikely(join_tab->need_to_build_rowid_filter))
+ {
+ join_tab->build_range_rowid_filter();
+ /*
+ We have to check join_tab->rowid_filter again as the above
+ function may have cleared it in case of errors.
+ */
+ if (join_tab->rowid_filter && join_tab->rowid_filter->is_empty())
+ rc= NESTED_LOOP_NO_MORE_ROWS;
+ }
+ else if (join_tab->rowid_filter->is_empty())
+ rc= NESTED_LOOP_NO_MORE_ROWS;
+ }
join->return_tab= join_tab;
@@ -21911,8 +23024,8 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
if (join_tab->loosescan_match_tab)
join_tab->loosescan_match_tab->found_match= FALSE;
- const bool pfs_batch_update= join_tab->pfs_batch_update(join);
- if (pfs_batch_update)
+ DBUG_ASSERT(join_tab->cached_pfs_batch_update == join_tab->pfs_batch_update());
+ if (join_tab->cached_pfs_batch_update)
join_tab->table->file->start_psi_batch_mode();
if (rc != NESTED_LOOP_NO_MORE_ROWS)
@@ -21923,11 +23036,9 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
rc= evaluate_join_record(join, join_tab, error);
}
- /*
- Note: psergey has added the 2nd part of the following condition; the
- change should probably be made in 5.1, too.
- */
bool skip_over= FALSE;
+ READ_RECORD *info= &join_tab->read_record;
+
while (rc == NESTED_LOOP_OK && join->return_tab >= join_tab)
{
if (join_tab->loosescan_match_tab &&
@@ -21962,15 +23073,21 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
rc= evaluate_join_record(join, join_tab, error);
}
- if (rc == NESTED_LOOP_NO_MORE_ROWS &&
- join_tab->last_inner && !join_tab->found)
- rc= evaluate_null_complemented_join_record(join, join_tab);
+ if (rc == NESTED_LOOP_NO_MORE_ROWS)
+ {
+ if (join_tab->last_inner && !join_tab->found)
+ {
+ rc= evaluate_null_complemented_join_record(join, join_tab);
+ if (rc == NESTED_LOOP_NO_MORE_ROWS)
+ rc= NESTED_LOOP_OK;
+ }
+ else
+ rc= NESTED_LOOP_OK;
+ }
- if (pfs_batch_update)
+ if (join_tab->cached_pfs_batch_update)
join_tab->table->file->end_psi_batch_mode();
- if (rc == NESTED_LOOP_NO_MORE_ROWS)
- rc= NESTED_LOOP_OK;
DBUG_RETURN(rc);
}
@@ -21997,7 +23114,6 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
ha_rows found_records=join->found_records;
COND *select_cond= join_tab->select_cond;
bool select_cond_result= TRUE;
-
DBUG_ENTER("evaluate_join_record");
DBUG_PRINT("enter",
("evaluate_join_record join: %p join_tab: %p "
@@ -22025,7 +23141,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
DBUG_RETURN(NESTED_LOOP_ERROR);
}
- if (!select_cond || select_cond_result)
+ if (select_cond_result)
{
/*
There is no select condition or the attached pushed down
@@ -22341,12 +23457,16 @@ join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos)
if (tab->table->pos_in_table_list->is_materialized_derived() &&
!tab->table->pos_in_table_list->fill_me)
{
+ DBUG_ASSERT(0);
//TODO: don't get here at all
- /* Skip materialized derived tables/views. */
+ /*
+ Skip materialized derived tables/views as they temporary table is not
+ opened yet.
+ */
DBUG_RETURN(0);
}
- else if (tab->table->pos_in_table_list->jtbm_subselect &&
- tab->table->pos_in_table_list->jtbm_subselect->is_jtbm_const_tab)
+ else if (tab->table->pos_in_table_list->jtbm_subselect &&
+ tab->table->pos_in_table_list->jtbm_subselect->is_jtbm_const_tab)
{
/* Row will not be found */
int res;
@@ -22362,7 +23482,7 @@ join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos)
{ // Info for DESCRIBE
tab->info= ET_CONST_ROW_NOT_FOUND;
/* Mark for EXPLAIN that the row was not found */
- pos->records_read=0.0;
+ pos->records_read= pos->records_out= 0.0;
pos->ref_depend_map= 0;
if (!table->pos_in_table_list->outer_join || error > 0)
DBUG_RETURN(error);
@@ -22375,20 +23495,12 @@ join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos)
}
else
{
- if (/*!table->file->key_read && */
- table->covering_keys.is_set(tab->ref.key) && !table->no_keyread &&
- (int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY)
- {
- table->file->ha_start_keyread(tab->ref.key);
- tab->index= tab->ref.key;
- }
error=join_read_const(tab);
- table->file->ha_end_keyread();
if (unlikely(error))
{
tab->info= ET_UNIQUE_ROW_NOT_FOUND;
/* Mark for EXPLAIN that the row was not found */
- pos->records_read=0.0;
+ pos->records_read= pos->records_out= 0.0;
pos->ref_depend_map= 0;
if (!table->pos_in_table_list->outer_join || error > 0)
DBUG_RETURN(error);
@@ -22504,10 +23616,20 @@ join_read_const(JOIN_TAB *tab)
error=HA_ERR_KEY_NOT_FOUND;
else
{
- error= table->file->ha_index_read_idx_map(table->record[0],tab->ref.key,
- (uchar*) tab->ref.key_buff,
- make_prev_keypart_map(tab->ref.key_parts),
- HA_READ_KEY_EXACT);
+ handler *file= table->file;
+ if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread &&
+ (int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY)
+ {
+ file->ha_start_keyread(tab->ref.key);
+ /* This is probably needed for analyze table */
+ tab->index= tab->ref.key;
+ }
+ error= file->
+ ha_index_read_idx_map(table->record[0],tab->ref.key,
+ (uchar*) tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts),
+ HA_READ_KEY_EXACT);
+ file->ha_end_keyread();
}
if (unlikely(error))
{
@@ -22851,6 +23973,7 @@ bool test_if_use_dynamic_range_scan(JOIN_TAB *join_tab)
return (join_tab->use_quick == 2 && test_if_quick_select(join_tab) > 0);
}
+
int join_init_read_record(JOIN_TAB *tab)
{
bool need_unpacking= FALSE;
@@ -22868,7 +23991,8 @@ int join_init_read_record(JOIN_TAB *tab)
need_unpacking= tbl ? tbl->is_sjm_scan_table() : FALSE;
}
- tab->build_range_rowid_filter_if_needed();
+ if (tab->need_to_build_rowid_filter)
+ tab->build_range_rowid_filter();
if (tab->filesort && tab->sort_table()) // Sort table.
return 1;
@@ -22900,10 +24024,29 @@ int join_init_read_record(JOIN_TAB *tab)
save_copy= tab->read_record.copy_field;
save_copy_end= tab->read_record.copy_field_end;
- if (init_read_record(&tab->read_record, tab->join->thd, tab->table,
- tab->select, tab->filesort_result, 1, 1, FALSE))
- return 1;
+ /*
+ JT_NEXT means that we should use an index scan on index 'tab->index'
+ However if filesort is set, the table was already sorted above
+ and now have to retrive the rows from the tmp file or by rnd_pos()
+ If !(tab->select && tab->select->quick)) it means that we are
+ in "Range checked for each record" and we better let the normal
+ init_read_record() handle this case
+ */
+ if (tab->type == JT_NEXT && ! tab->filesort &&
+ !(tab->select && tab->select->quick))
+ {
+ /* Used with covered_index scan or force index */
+ if (init_read_record_idx(&tab->read_record, tab->join->thd, tab->table,
+ 1, tab->index, 0))
+ return 1;
+ }
+ else
+ {
+ if (init_read_record(&tab->read_record, tab->join->thd, tab->table,
+ tab->select, tab->filesort_result, 1, 1, FALSE))
+ return 1;
+ }
tab->read_record.copy_field= save_copy;
tab->read_record.copy_field_end= save_copy_end;
@@ -23266,11 +24409,8 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
List<Item> *fields= join_tab ? (join_tab-1)->fields : join->fields;
DBUG_ENTER("end_send_group");
- if (!join->items3.is_null() && !join->set_group_rpa)
- {
- join->set_group_rpa= true;
+ if (!join->items3.is_null() && join->current_ref_ptrs != join->items3)
join->set_items_ref_array(join->items3);
- }
if (!join->first_record || end_of_records ||
(idx=test_if_group_changed(join->group_fields)) >= 0)
@@ -23807,10 +24947,10 @@ bool test_if_ref(Item *root_cond, Item_field *left_item,Item *right_item)
@param cond Condition to analyze
@param tables Tables for which "current field values" are available
+ Tables for which "current field values" are available (this
+ includes used_table)
+ (may also include PSEUDO_TABLE_BITS, and may be zero)
@param used_table Table that we're extracting the condition for
- tables Tables for which "current field values" are available (this
- includes used_table)
- (may also include PSEUDO_TABLE_BITS, and may be zero)
@param join_tab_idx_arg
The index of the JOIN_TAB this Item is being extracted
for. MAX_TABLES if there is no corresponding JOIN_TAB.
@@ -24241,6 +25381,7 @@ static int test_if_order_by_key(JOIN *join,
uint key_parts;
bool have_pk_suffix= false;
uint pk= table->s->primary_key;
+ ORDER::enum_order keypart_order;
DBUG_ENTER("test_if_order_by_key");
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
@@ -24253,16 +25394,23 @@ static int test_if_order_by_key(JOIN *join,
for (; order ; order=order->next, const_key_parts>>=1)
{
Item_field *item_field= ((Item_field*) (*order->item)->real_item());
- Field *field= item_field->field;
int flag;
/*
Skip key parts that are constants in the WHERE clause.
These are already skipped in the ORDER BY by const_expression_in_where()
+ for top level queries.
*/
for (; const_key_parts & 1 ; const_key_parts>>= 1)
- key_part++;
-
+ {
+ if (item_field->contains(key_part->field))
+ {
+ /* Subquery with ORDER BY, continue with next field */
+ goto next_order_field;
+ }
+ key_part++;
+ }
+
/*
This check was in this function historically (although I think it's
better to check it outside of this function):
@@ -24287,36 +25435,27 @@ static int test_if_order_by_key(JOIN *join,
goto ok;
}
- if (key_part == key_part_end)
+ if (key_part == key_part_end ||
+ !key_part->field->part_of_sortkey.is_set(idx))
{
/*
- There are some items left in ORDER BY that we don't
+ There are some items left in ORDER BY that we don't have in the key
*/
DBUG_RETURN(0);
}
- if (key_part->field != field)
- {
- /*
- Check if there is a multiple equality that allows to infer that field
- and key_part->field are equal
- (see also: compute_part_of_sort_key_for_equals)
- */
- if (item_field->item_equal &&
- item_field->item_equal->contains(key_part->field))
- field= key_part->field;
- }
- if (key_part->field != field || !field->part_of_sortkey.is_set(idx))
+ if (!item_field->contains(key_part->field))
DBUG_RETURN(0);
- const ORDER::enum_order keypart_order=
- (key_part->key_part_flag & HA_REVERSE_SORT) ?
- ORDER::ORDER_DESC : ORDER::ORDER_ASC;
+ keypart_order= ((key_part->key_part_flag & HA_REVERSE_SORT) ?
+ ORDER::ORDER_DESC : ORDER::ORDER_ASC);
/* set flag to 1 if we can use read-next on key, else to -1 */
flag= (order->direction == keypart_order) ? 1 : -1;
if (reverse && flag != reverse)
DBUG_RETURN(0);
reverse=flag; // Remember if reverse
+
+next_order_field:
if (key_part < key_part_end)
key_part++;
}
@@ -24349,31 +25488,40 @@ ok:
@return
MAX_KEY no suitable key found
key index otherwise
+
+ @notes
+ We should not use keyread_time() as in the case of disk_read_cost= 0
+ all keys would be regarded equal.
*/
uint find_shortest_key(TABLE *table, const key_map *usable_keys)
{
- double min_cost= DBL_MAX;
+ size_t min_length= INT_MAX32;
uint best= MAX_KEY;
- if (!usable_keys->is_clear_all())
+ uint possible_keys= usable_keys->bits_set();
+
+ if (possible_keys)
{
+ if (possible_keys == 1)
+ return usable_keys->find_first_bit();
+
for (uint nr=0; nr < table->s->keys ; nr++)
{
if (usable_keys->is_set(nr))
{
- double cost= table->file->keyread_time(nr, 1, table->file->records());
- if (cost < min_cost)
+ size_t length= table->key_storage_length(nr);
+ if (length < min_length)
{
- min_cost= cost;
- best=nr;
+ min_length= length;
+ best= nr;
}
- DBUG_ASSERT(best < MAX_KEY);
}
}
}
return best;
}
+
/**
Test if a second key is the subkey of the first one.
@@ -24749,7 +25897,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT)
{
/*
- we set ref_key=MAX_KEY instead of -1, because test_if_cheaper ordering
+ we set ref_key=MAX_KEY instead of -1, because test_if_cheaper_ordering()
assumes that "ref_key==-1" means doing full index scan.
(This is not very straightforward and we got into this situation for
historical reasons. Should be fixed at some point).
@@ -24836,7 +25984,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
goto use_filesort;
}
DBUG_ASSERT(tab->select->quick);
- tab->type= JT_ALL;
+ tab->type= JT_RANGE;
tab->ref.key= -1;
tab->ref.key_parts= 0;
tab->use_quick= 1;
@@ -24899,13 +26047,12 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
*/
if (best_key < 0 ||
((select_limit >= table_records) &&
- (tab->type == JT_ALL &&
+ ((tab->type == JT_ALL || tab->type == JT_RANGE) &&
tab->join->table_count > tab->join->const_tables + 1) &&
- !(table->file->index_flags(best_key, 0, 1) & HA_CLUSTERED_INDEX)))
+ !table->is_clustering_key(best_key)))
goto use_filesort;
- if (select && // psergey: why doesn't this use a quick?
- table->opt_range_keys.is_set(best_key) && best_key != ref_key)
+ if (table->opt_range_keys.is_set(best_key) && best_key != ref_key)
{
key_map tmp_map;
tmp_map.clear_all(); // Force the creation of quick select
@@ -24938,10 +26085,10 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
}
order_direction= best_key_direction;
/*
- saved_best_key_parts is actual number of used keyparts found by the
- test_if_order_by_key function. It could differ from keyinfo->user_defined_key_parts,
- thus we have to restore it in case of desc order as it affects
- QUICK_SELECT_DESC behaviour.
+ saved_best_key_parts is actual number of used keyparts found by
+ the test_if_order_by_key function. It could differ from
+ keyinfo->user_defined_key_parts, thus we have to restore it in
+ case of desc order as it affects QUICK_SELECT_DESC behaviour.
*/
used_key_parts= (order_direction == -1) ?
saved_best_key_parts : best_key_parts;
@@ -24993,8 +26140,9 @@ check_reverse_order:
select->quick= 0; // Cleanup either reset to save_quick,
// or 'delete save_quick'
tab->index= best_key;
- tab->read_first_record= order_direction > 0 ?
- join_read_first:join_read_last;
+ tab->read_first_record= (order_direction > 0 ?
+ join_read_first:
+ join_read_last);
tab->type=JT_NEXT; // Read with index_first(), index_next()
/*
@@ -25003,11 +26151,7 @@ check_reverse_order:
*/
if (tab->rowid_filter &&
table->file->is_clustering_key(tab->index))
- {
- tab->range_rowid_filter_info= 0;
- delete tab->rowid_filter;
- tab->rowid_filter= 0;
- }
+ tab->clear_range_rowid_filter();
if (tab->pre_idx_push_select_cond)
{
@@ -25037,16 +26181,12 @@ check_reverse_order:
method is actually used.
*/
DBUG_ASSERT(tab->select->quick);
- tab->type=JT_ALL;
+ tab->type= JT_RANGE;
tab->use_quick=1;
tab->ref.key= -1;
tab->ref.key_parts=0; // Don't use ref key.
- tab->range_rowid_filter_info= 0;
if (tab->rowid_filter)
- {
- delete tab->rowid_filter;
- tab->rowid_filter= 0;
- }
+ tab->clear_range_rowid_filter();
tab->read_first_record= join_init_read_record;
if (tab->is_using_loose_index_scan())
tab->join->tmp_table_param.precomputed_group_by= TRUE;
@@ -25254,6 +26394,7 @@ create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort)
}
else
{
+ fsort->own_select= false;
DBUG_ASSERT(tab->type == JT_REF || tab->type == JT_EQ_REF);
// Update ref value
if (unlikely(cp_buffer_from_ref(thd, table, &tab->ref) &&
@@ -26877,6 +28018,17 @@ change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
for (uint i= 0; (item= it++); i++)
{
Field *field;
+ /*
+ SUM_FUNC_ITEM will be replaced by the calculated value which is
+ stored in the temporary table.
+ The first part of the following test is for items that are expressions
+ with SUM_FUNC_ITEMS, like 'sum(a)+1'. In this case we keep the original
+ item, which contain an Item_ref that points to the SUM_FUNC_ITEM that
+ will be replaced with a pointer to the calculated value.
+ The second test is for window functions. Window functions contains
+ only pointers to Item_refs, which will be adjusted to point to the
+ temporary table.
+ */
enum Item::Type item_type= item->type();
if ((item->with_sum_func() && item_type != Item::SUM_FUNC_ITEM) ||
item->with_window_func())
@@ -27839,7 +28991,7 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
table_map prefix_tables,
bool distinct_arg, JOIN_TAB *first_top_tab)
{
- int quick_type;
+ int quick_type= -1;
CHARSET_INFO *cs= system_charset_info;
THD *thd= join->thd;
TABLE_LIST *table_list= table->pos_in_table_list;
@@ -27847,12 +28999,22 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
my_bool key_read;
char table_name_buffer[SAFE_NAME_LEN];
KEY *key_info= 0;
- uint key_len= 0;
- quick_type= -1;
+ uint key_len= 0, used_index= MAX_KEY;
+
+#ifdef NOT_YET
+ /*
+ Would be good to keep this condition up to date.
+ Another alternative is to remove JOIN_TAB::cond_selectivity and use
+ TABLE::cond_selectivity everywhere
+ */
+ DBUG_ASSERT(cond_selectivity == table->cond_selectivity);
+#endif
explain_plan= eta;
eta->key.clear();
eta->quick_info= NULL;
+ eta->cost= join_read_time;
+ eta->loops= join_loops;
SQL_SELECT *tab_select;
/*
@@ -27873,6 +29035,7 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
// psergey-todo: data for filtering!
tracker= &eta->tracker;
jbuf_tracker= &eta->jbuf_tracker;
+ jbuf_unpack_tracker= &eta->jbuf_unpack_tracker;
/* Enable the table access time tracker only for "ANALYZE stmt" */
if (thd->lex->analyze_stmt)
@@ -27949,7 +29112,7 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
/* "type" column */
enum join_type tab_type= type;
- if ((type == JT_ALL || type == JT_HASH) &&
+ if ((type == JT_ALL || type == JT_RANGE || type == JT_HASH) &&
tab_select && tab_select->quick && use_quick != 2)
{
cur_quick= tab_select->quick;
@@ -27958,9 +29121,9 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
(quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT) ||
(quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) ||
(quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION))
- tab_type= type == JT_ALL ? JT_INDEX_MERGE : JT_HASH_INDEX_MERGE;
+ tab_type= type == JT_HASH ? JT_HASH_INDEX_MERGE : JT_INDEX_MERGE;
else
- tab_type= type == JT_ALL ? JT_RANGE : JT_HASH_RANGE;
+ tab_type= type == JT_HASH ? JT_HASH_RANGE : JT_RANGE;
}
eta->type= tab_type;
@@ -27991,11 +29154,13 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
if (tab_type == JT_NEXT)
{
+ used_index= index;
key_info= table->key_info+index;
key_len= key_info->key_length;
}
else if (ref.key_parts)
{
+ used_index= ref.key;
key_info= get_keyinfo_by_key_no(ref.key);
key_len= ref.key_length;
}
@@ -28045,6 +29210,7 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
if (tab_type == JT_HASH_NEXT) /* full index scan + hash join */
{
+ used_index= index;
eta->hash_next_key.set(thd->mem_root,
& table->key_info[index],
table->key_info[index].key_length);
@@ -28095,30 +29261,26 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
}
else
{
- ha_rows examined_rows= get_examined_rows();
+ double examined_rows= get_examined_rows();
eta->rows_set= true;
- eta->rows= examined_rows;
+ eta->rows= double_to_rows(examined_rows);
/* "filtered" */
float f= 0.0;
if (examined_rows)
{
- double pushdown_cond_selectivity= cond_selectivity;
- if (pushdown_cond_selectivity == 1.0)
- f= (float) (100.0 * records_read / examined_rows);
- else
- f= (float) (100.0 * pushdown_cond_selectivity);
+ f= (float) (100.0 * records_out / examined_rows);
+ set_if_smaller(f, 100.0);
}
- set_if_smaller(f, 100.0);
eta->filtered_set= true;
eta->filtered= f;
}
/* Build "Extra" field and save it */
key_read= table->file->keyread_enabled();
- if ((tab_type == JT_NEXT || tab_type == JT_CONST) &&
- table->covering_keys.is_set(index))
+ if ((tab_type == JT_NEXT || tab_type == JT_CONST) && used_index != MAX_KEY &&
+ table->covering_keys.is_set(used_index))
key_read=1;
if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT &&
!((QUICK_ROR_INTERSECT_SELECT*)cur_quick)->need_to_fetch_row)
@@ -28466,6 +29628,7 @@ int JOIN::save_explain_data_intern(Explain_query *output,
table_map used_tables=0;
join->select_lex->set_explain_type(true);
+ xpl_sel->cost= best_read;
xpl_sel->select_id= join->select_lex->select_number;
xpl_sel->select_type= join->select_lex->type;
xpl_sel->linkage= select_lex->get_linkage();
@@ -28504,9 +29667,9 @@ int JOIN::save_explain_data_intern(Explain_query *output,
continue;
}
-
Explain_table_access *eta= (new (output->mem_root)
- Explain_table_access(output->mem_root));
+ Explain_table_access(output->mem_root,
+ thd->lex->analyze_stmt));
if (!eta)
DBUG_RETURN(1);
@@ -29402,8 +30565,8 @@ void JOIN::restore_query_plan(Join_plan_state *restore_from)
@param added_where An extra conjunct to the WHERE clause to reoptimize with
@param join_tables The set of tables to reoptimize
- @param save_to If != NULL, save here the state of the current query plan,
- otherwise reuse the existing query plan structures.
+ @param save_to If != NULL, save here the state of the current query
+ plan, otherwise reuse the existing query plan structures.
@notes
Given a query plan that was already optimized taking into account some WHERE
@@ -29420,7 +30583,8 @@ void JOIN::restore_query_plan(Join_plan_state *restore_from)
@retval REOPT_NEW_PLAN there is a new plan.
@retval REOPT_OLD_PLAN no new improved plan was produced, use the old one.
- @retval REOPT_ERROR an irrecovarable error occurred during reoptimization.
+ @retval REOPT_ERROR an irrecovarable error occurred during
+ reoptimization.
*/
JOIN::enum_reopt_result
@@ -29432,8 +30596,8 @@ JOIN::reoptimize(Item *added_where, table_map join_tables,
size_t org_keyuse_elements;
/* Re-run the REF optimizer to take into account the new conditions. */
- if (update_ref_and_keys(thd, &added_keyuse, join_tab, table_count, added_where,
- ~outer_join, select_lex, &sargables))
+ if (update_ref_and_keys(thd, &added_keyuse, join_tab, table_count,
+ added_where, ~outer_join, select_lex, &sargables))
{
delete_dynamic(&added_keyuse);
return REOPT_ERROR;
@@ -29487,7 +30651,7 @@ JOIN::reoptimize(Item *added_where, table_map join_tables,
return REOPT_ERROR;
/* Re-run the join optimizer to compute a new query plan. */
- if (choose_plan(this, join_tables))
+ if (choose_plan(this, join_tables, 0))
return REOPT_ERROR;
return REOPT_NEW_PLAN;
@@ -29529,139 +30693,132 @@ void JOIN::cache_const_exprs()
/*
- Get the cost of using index keynr to read #LIMIT matching rows
+ Get the cost of using index keynr to read #LIMIT matching rows by calling
+ ha_index_next() repeatedly (either with index scan, quick or 'ref')
@detail
- If there is a quick select, we try to use it.
- - if there is a ref(const) access, we try to use it, too.
- - quick and ref(const) use different cost formulas, so if both are possible
- we should make a cost-based choice.
-
- rows_limit is the number of rows we would need to read when using a full
- index scan. This is generally higher than the N from "LIMIT N" clause,
- because there's a WHERE condition (a part of which is used to construct a
- range access we are considering using here)
+ - If there is no quick select return the full cost from
+ cost_for_index_read() (Doing a full scan with up to 'limit' records)
+
+ @param pos Result from best_access_path(). Is NULL for
+ single-table UPDATE/DELETE
+ @param table Table to be sorted
+ @param keynr Which index to use
+ @param rows_limit How many rows we want to read.
+ This may be different than what was in the original
+ LIMIT the caller has included fanouts and extra
+ rows needed for handling GROUP BY.
+ @param rows_to_scan Number of rows to scan if there is no range.
+ @param read_cost Full cost, including cost of WHERE.
+ @param read_rows Number of rows that needs to be read
- @param tab JOIN_TAB with table access (is NULL for single-table
- UPDATE/DELETE)
- @param rows_limit See explanation above
- @param read_time OUT Cost of reading using quick or ref(const) access.
+ @return
+ 0 No possible range scan, cost is for index scan
+ 1 Range scan should be used
+ For the moment we don't take selectivity of the WHERE clause into
+ account when calculating the number of rows we have to read
+ (except what we get from quick select).
- @return
- true There was a possible quick or ref access, its cost is in the OUT
- parameters.
- false No quick or ref(const) possible (and so, the caller will attempt
- to use a full index scan on this index).
+ The cost is calculated the following way:
+ (The selectivity is there to take into account the increased number of
+ rows that we have to read to find LIMIT matching rows)
*/
-static bool get_range_limit_read_cost(const JOIN_TAB *tab,
- const TABLE *table,
- ha_rows table_records,
+static bool get_range_limit_read_cost(const POSITION *pos,
+ const TABLE *table,
uint keynr,
- ha_rows rows_limit,
- double *read_time)
+ ha_rows rows_limit_arg,
+ ha_rows rows_to_scan,
+ double *read_cost,
+ double *read_rows)
{
- bool res= false;
- /*
- We need to adjust the estimates if we had a quick select (or ref(const)) on
- index keynr.
- */
+ double rows_limit= rows2double(rows_limit_arg);
if (table->opt_range_keys.is_set(keynr))
{
/*
Start from quick select's rows and cost. These are always cheaper than
full index scan/cost.
*/
- double best_rows= (double) table->opt_range[keynr].rows;
- double best_cost= (double) table->opt_range[keynr].cost;
-
- /*
- Check if ref(const) access was possible on this index.
- */
- if (tab)
- {
- key_part_map map= 1;
- uint kp;
- /* Find how many key parts would be used by ref(const) */
- for (kp=0; kp < MAX_REF_PARTS; map=map << 1, kp++)
- {
- if (!(table->const_key_parts[keynr] & map))
- break;
- }
-
- if (kp > 0)
- {
- ha_rows ref_rows;
- /*
- Two possible cases:
- 1. ref(const) uses the same #key parts as range access.
- 2. ref(const) uses fewer key parts, becasue there is a
- range_cond(key_part+1).
- */
- if (kp == table->opt_range[keynr].key_parts)
- ref_rows= table->opt_range[keynr].rows;
- else
- ref_rows= (ha_rows) table->key_info[keynr].actual_rec_per_key(kp-1);
+ double best_rows, range_rows;
+ double range_cost= (double) table->opt_range[keynr].cost.fetch_cost();
+ best_rows= range_rows= (double) table->opt_range[keynr].rows;
- if (ref_rows > 0)
- {
- double tmp= cost_for_index_read(tab->join->thd, table, keynr,
- ref_rows,
- (ha_rows) tab->worst_seeks);
- if (tmp < best_cost)
- {
- best_cost= tmp;
- best_rows= (double)ref_rows;
- }
- }
- }
- }
-
- /*
- Consider an example:
-
- SELECT *
- FROM t1
- WHERE key1 BETWEEN 10 AND 20 AND col2='foo'
- ORDER BY key1 LIMIT 10
+ if (pos)
+ {
+ /*
+ Take into count table selectivity as the number of accepted
+ rows for this table will be 'records_out'.
- If we were using a full index scan on key1, we would need to read this
- many rows to get 10 matches:
+ For example:
+ key1 BETWEEN 10 AND 1000 AND key2 BETWEEN 10 AND 20
- 10 / selectivity(key1 BETWEEN 10 AND 20 AND col2='foo')
+ If we are trying to do an ORDER BY on key1, we have to take into
+ account that using key2 we have to examine much fewer rows.
+ */
+ best_rows= pos->records_out; // Best rows with any key/keys
+ double cond_selectivity;
+ /*
+ We assign "double range_rows" from integer #rows a few lines above
+ so comparison with 0.0 makes sense
+ */
+ if (range_rows > 0.0)
+ cond_selectivity= best_rows / range_rows;
+ else
+ cond_selectivity= 1.0;
- This is the number we get in rows_limit.
- But we intend to use range access on key1. The rows returned by quick
- select will satisfy the range part of the condition,
- "key1 BETWEEN 10 and 20". We will still need to filter them with
- the remainder condition, (col2='foo').
+ DBUG_ASSERT(cond_selectivity <= 1.000000001);
+ set_if_smaller(cond_selectivity, 1.0);
- The selectivity of the range access is (best_rows/table_records). We need
- to discount it from the rows_limit:
- */
- double rows_limit_for_quick= rows_limit * (best_rows / table_records);
+ /*
+ We have to examine more rows in the proportion to the selectivity of the
+ the table
+ */
+ rows_limit= rows_limit / cond_selectivity;
+ }
- if (best_rows > rows_limit_for_quick)
+ if (best_rows > rows_limit)
{
/*
LIMIT clause specifies that we will need to read fewer records than
quick select will return. Assume that quick select's cost is
- proportional to the number of records we need to return (e.g. if we
+ proportional to the number of records we need to return (e.g. if we
only need 1/3rd of records, it will cost us 1/3rd of quick select's
read time)
*/
- best_cost *= rows_limit_for_quick / best_rows;
+ range_cost*= rows_limit / best_rows;
+ range_rows= rows_limit;
}
- *read_time= best_cost;
- res= true;
+ *read_cost= range_cost + range_rows * WHERE_COST_THD(table->in_use);
+ *read_rows= range_rows;
+ return 1;
}
- return res;
+
+ /*
+ Calculate the number of rows we have to check if we are
+ doing a full index scan (as a suitable range scan was not available).
+
+ We assume that each of the tested indexes is not correlated
+ with ref_key. Thus, to select first N records we have to scan
+ N/selectivity(ref_key) index entries.
+ selectivity(ref_key) = #scanned_records/#table_records =
+ refkey_rows_estimate/table_records.
+ In any case we can't select more than #table_records.
+ N/(refkey_rows_estimate/table_records) > table_records
+ <=> N > refkey_rows_estimate.
+ */
+ ALL_READ_COST cost= cost_for_index_read(table->in_use, table, keynr,
+ rows_to_scan, 0);
+ *read_cost= (table->file->cost(&cost) +
+ rows_to_scan * WHERE_COST_THD(table->in_use));
+ *read_rows= rows2double(rows_to_scan);
+ return 0;
}
/**
- Find a cheaper access key than a given @a key
+ Find a cheaper access key than a given key
@param tab NULL or JOIN_TAB of the accessed table
@param order Linked list of ORDER BY arguments
@@ -29678,7 +30835,8 @@ static bool get_range_limit_read_cost(const JOIN_TAB *tab,
@param [out] new_key Key number if success, otherwise undefined
@param [out] new_key_direction Return -1 (reverse) or +1 if success,
otherwise undefined
- @param [out] new_select_limit Return adjusted LIMIT
+ @param [out] new_select_limit Estimate of the number of rows we have
+ to read find 'select_limit' rows.
@param [out] new_used_key_parts NULL by default, otherwise return number
of new_key prefix columns if success
or undefined if the function fails
@@ -29709,25 +30867,41 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
It may be the case if ORDER/GROUP BY is used with LIMIT.
*/
ha_rows best_select_limit= HA_POS_ERROR;
- JOIN *join= tab ? tab->join : NULL;
+ JOIN *join;
uint nr;
key_map keys;
- uint best_key_parts= 0;
int best_key_direction= 0;
- ha_rows best_records= 0;
- double read_time;
+ double read_time, filesort_cost;
+ enum sort_type filesort_type;
int best_key= -1;
- bool is_best_covering= FALSE;
- double fanout= 1;
+ double fanout;
ha_rows table_records= table->stat_records();
- bool group= join && join->group && order == join->group_list;
- ha_rows refkey_rows_estimate= table->opt_range_condition_rows;
+ bool group;
const bool has_limit= (select_limit_arg != HA_POS_ERROR);
- THD* thd= join ? join->thd : table->in_use;
-
+ THD *thd= table->in_use;
+ POSITION *position;
+ ha_rows rows_estimate, refkey_rows_estimate;
Json_writer_object trace_wrapper(thd);
Json_writer_object trace_cheaper_ordering(
thd, "reconsidering_access_paths_for_index_ordering");
+
+ if (tab)
+ {
+ join= tab->join;
+ position= &join->best_positions[tab- join->join_tab];
+ group=join->group && order == join->group_list;
+ /* Take into account that records_out can be < 1.0 in case of GROUP BY */
+ rows_estimate= double_to_rows(position->records_out+0.5);
+ set_if_bigger(rows_estimate, 1);
+ refkey_rows_estimate= rows_estimate;
+ }
+ else
+ {
+ join= NULL;
+ position= 0;
+ refkey_rows_estimate= rows_estimate= table_records;
+ group= 0;
+ }
trace_cheaper_ordering.add("clause", group ? "GROUP BY" : "ORDER BY");
/*
@@ -29753,26 +30927,32 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
else
keys= usable_keys;
- if (join)
+
+ if (join) // True if SELECT
{
- uint tablenr= (uint)(tab - join->join_tab);
- read_time= join->best_positions[tablenr].read_time;
- for (uint i= tablenr+1; i < join->table_count; i++)
+ uint nr= (uint) (tab - join->join_tab);
+ fanout= 1.0;
+ if (nr != join->table_count - 1) // If not last table
+ fanout= (join->join_record_count / position->records_out);
+ else
{
- fanout*= join->best_positions[i].records_read; // fanout is always >= 1
- // But selectivity is =< 1 :
- fanout*= join->best_positions[i].cond_selectivity;
+ /* Only one table. Limit cannot be bigger than table_records */
+ set_if_smaller(select_limit_arg, table_records);
}
+ read_time= position->read_time;
}
else
- read_time= table->file->scan_time();
-
- trace_cheaper_ordering.add("fanout", fanout);
- /*
- TODO: add cost of sorting here.
- */
- read_time += COST_EPS;
- trace_cheaper_ordering.add("read_time", read_time);
+ {
+ /* Probably an update or delete. Assume we will do a full table scan */
+ fanout= 1.0;
+ read_time= table->file->cost(table->file->ha_scan_and_compare_time(rows_estimate));
+ set_if_smaller(select_limit_arg, table_records);
+ }
+
+ filesort_cost= cost_of_filesort(table, order, rows_estimate,
+ select_limit_arg, &filesort_type);
+ read_time+= filesort_cost;
+
/*
Calculate the selectivity of the ref_key for REF_ACCESS. For
RANGE_ACCESS we use table->opt_range_condition_rows.
@@ -29797,18 +30977,35 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
set_if_bigger(refkey_rows_estimate, 1);
}
- if (tab)
- trace_cheaper_ordering.add_table_name(tab);
- else
- trace_cheaper_ordering.add_table_name(table);
- trace_cheaper_ordering.add("rows_estimation", refkey_rows_estimate);
+ if (unlikely(thd->trace_started()))
+ {
+ if (tab)
+ trace_cheaper_ordering.add_table_name(tab);
+ else
+ trace_cheaper_ordering.add_table_name(table);
+ trace_cheaper_ordering.
+ add("rows_estimation", rows_estimate).
+ add("filesort_cost", filesort_cost).
+ add("read_cost", read_time).
+ add("filesort_type", filesort_names[filesort_type].str).
+ add("fanout", fanout);
+ }
+
+ /*
+ Force using an index for sorting if there was no ref key
+ and FORCE INDEX was used.
+ */
+ if (table->force_index && ref_key < 0)
+ read_time= DBL_MAX;
Json_writer_array possible_keys(thd,"possible_keys");
for (nr=0; nr < table->s->keys ; nr++)
{
int direction;
ha_rows select_limit= select_limit_arg;
+ ha_rows estimated_rows_to_scan;
uint used_key_parts= 0;
+ double range_cost, range_rows;
Json_writer_object possible_key(thd);
possible_key.add("index", table->key_info[nr].name);
@@ -29826,9 +31023,8 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
possible_key.add("can_resolve_order", true);
possible_key.add("direction", direction);
bool is_covering= (table->covering_keys.is_set(nr) ||
- (table->file->index_flags(nr, 0, 1) &
- HA_CLUSTERED_INDEX));
- /*
+ table->is_clustering_key(nr));
+ /*
Don't use an index scan with ORDER BY without limit.
For GROUP BY without limit always use index scan
if there is a suitable index.
@@ -29838,14 +31034,11 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
queries too.
*/
if (is_covering ||
- select_limit != HA_POS_ERROR ||
+ has_limit ||
(ref_key < 0 && (group || table->force_index)))
{
double rec_per_key;
- double index_scan_time;
KEY *keyinfo= table->key_info+nr;
- if (select_limit == HA_POS_ERROR)
- select_limit= table_records;
if (group)
{
/*
@@ -29863,6 +31056,13 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
/* Take into account the selectivity of the used pk prefix */
if (used_pk_parts)
{
+ /*
+ TODO: This code need to be tested with debugger
+ - Why set rec_per_key to 1 if we don't have primary key data
+ or the full key is used ?
+ - If used_pk_parts == 1, we don't take into account that
+ the first primary key part could part of the current key.
+ */
KEY *pkinfo=tab->table->key_info+table->s->primary_key;
/*
If the values of of records per key for the prefixes
@@ -29894,7 +31094,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
rec_per_key/= pkinfo->actual_rec_per_key(i);
}
}
- }
+ }
}
set_if_bigger(rec_per_key, 1);
/*
@@ -29918,150 +31118,76 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
and as result we'll choose an index scan when using ref/range
access + filesort will be cheaper.
*/
- select_limit= (ha_rows) (select_limit < fanout ?
- 1 : select_limit/fanout);
-
- /*
- refkey_rows_estimate is E(#rows) produced by the table access
- strategy that was picked without regard to ORDER BY ... LIMIT.
-
- It will be used as the source of selectivity data.
- Use table->cond_selectivity as a better estimate which includes
- condition selectivity too.
- */
- {
- // we use MIN(...), because "Using LooseScan" queries have
- // cond_selectivity=1 while refkey_rows_estimate has a better
- // estimate.
- refkey_rows_estimate= MY_MIN(refkey_rows_estimate,
- ha_rows(table_records *
- table->cond_selectivity));
- }
-
- /*
- We assume that each of the tested indexes is not correlated
- with ref_key. Thus, to select first N records we have to scan
- N/selectivity(ref_key) index entries.
- selectivity(ref_key) = #scanned_records/#table_records =
- refkey_rows_estimate/table_records.
- In any case we can't select more than #table_records.
- N/(refkey_rows_estimate/table_records) > table_records
- <=> N > refkey_rows_estimate.
- */
+ select_limit= double_to_rows(select_limit/fanout);
+ set_if_bigger(select_limit, 1);
if (select_limit > refkey_rows_estimate)
- select_limit= table_records;
+ estimated_rows_to_scan= table_records;
else
- select_limit= (ha_rows) (select_limit *
- (double) table_records /
- refkey_rows_estimate);
- possible_key.add("updated_limit", select_limit);
- rec_per_key= keyinfo->actual_rec_per_key(keyinfo->user_defined_key_parts-1);
- set_if_bigger(rec_per_key, 1);
- /*
- Here we take into account the fact that rows are
- accessed in sequences rec_per_key records in each.
- Rows in such a sequence are supposed to be ordered
- by rowid/primary key. When reading the data
- in a sequence we'll touch not more pages than the
- table file contains.
- TODO. Use the formula for a disk sweep sequential access
- to calculate the cost of accessing data rows for one
- index entry.
- */
- index_scan_time= select_limit/rec_per_key *
- MY_MIN(rec_per_key, table->file->scan_time());
- double range_scan_time;
- if (get_range_limit_read_cost(tab, table, table_records, nr,
- select_limit, &range_scan_time))
+ estimated_rows_to_scan= (ha_rows) (select_limit *
+ (double) table_records /
+ (double) refkey_rows_estimate);
+
+ bool range_scan= get_range_limit_read_cost(tab ? position : 0,
+ table,
+ nr,
+ select_limit,
+ estimated_rows_to_scan,
+ &range_cost,
+ &range_rows);
+ if (unlikely(possible_key.trace_started()))
{
- possible_key.add("range_scan_time", range_scan_time);
- if (range_scan_time < index_scan_time)
- index_scan_time= range_scan_time;
+ possible_key
+ .add("rows_to_examine", range_rows)
+ .add("range_scan", range_scan)
+ .add("scan_cost", range_cost);
}
- possible_key.add("index_scan_time", index_scan_time);
- if ((ref_key < 0 && (group || table->force_index || is_covering)) ||
- index_scan_time < read_time)
+ /*
+ We will try use the key if:
+ - If there is no ref key and no usable keys has yet been found and
+ there is either a group by or a FORCE_INDEX
+ - If the new cost is better than read_time
+ */
+ if (range_cost < read_time)
{
- ha_rows quick_records= table_records;
- ha_rows refkey_select_limit= (ref_key >= 0 &&
- !is_hash_join_key_no(ref_key) &&
- table->covering_keys.is_set(ref_key)) ?
- refkey_rows_estimate :
- HA_POS_ERROR;
- if (is_best_covering && !is_covering)
- {
- possible_key.add("chosen", false);
- possible_key.add("cause", "covering index already found");
- continue;
- }
-
- if (is_covering && refkey_select_limit < select_limit)
- {
- possible_key.add("chosen", false);
- possible_key.add("cause", "ref estimates better");
- continue;
- }
- if (table->opt_range_keys.is_set(nr))
- quick_records= table->opt_range[nr].rows;
- possible_key.add("records", quick_records);
- if (best_key < 0 ||
- (select_limit <= MY_MIN(quick_records,best_records) ?
- keyinfo->user_defined_key_parts < best_key_parts :
- quick_records < best_records) ||
- (!is_best_covering && is_covering))
- {
- possible_key.add("chosen", true);
- best_key= nr;
- best_key_parts= keyinfo->user_defined_key_parts;
- if (saved_best_key_parts)
- *saved_best_key_parts= used_key_parts;
- best_records= quick_records;
- is_best_covering= is_covering;
- best_key_direction= direction;
- best_select_limit= select_limit;
- }
- else
- {
- char const *cause;
- possible_key.add("chosen", false);
- if (is_covering)
- cause= "covering index already found";
- else
- {
- if (select_limit <= MY_MIN(quick_records,best_records))
- cause= "keyparts greater than the current best keyparts";
- else
- cause= "rows estimation greater";
- }
- possible_key.add("cause", cause);
- }
+ read_time= range_cost;
+ possible_key.add("chosen", true);
+ best_key= nr;
+ if (saved_best_key_parts)
+ *saved_best_key_parts= used_key_parts;
+ if (new_used_key_parts)
+ *new_used_key_parts= keyinfo->user_defined_key_parts;
+ best_key_direction= direction;
+ best_select_limit= estimated_rows_to_scan;
}
- else
+ else if (unlikely(possible_key.trace_started()))
{
- possible_key.add("usable", false);
- possible_key.add("cause", "cost");
+ possible_key
+ .add("usable", false)
+ .add("cause", "cost");
}
}
- else
+ else if (unlikely(possible_key.trace_started()))
{
possible_key.add("usable", false);
if (!group && select_limit == HA_POS_ERROR)
possible_key.add("cause", "order by without limit");
}
}
- else
+ else if (unlikely(possible_key.trace_started()))
{
if (keys.is_set(nr))
{
- possible_key.add("can_resolve_order", false);
- possible_key.add("cause", "order can not be resolved by key");
+ possible_key.
+ add("can_resolve_order", false).
+ add("cause", "order can not be resolved by key");
}
else
{
- possible_key.add("can_resolve_order", false);
- possible_key.add("cause", "not usable index for the query");
+ possible_key.
+ add("can_resolve_order", false).
+ add("cause", "not usable index for the query");
}
}
}
@@ -30072,8 +31198,6 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
*new_key= best_key;
*new_key_direction= best_key_direction;
*new_select_limit= has_limit ? best_select_limit : table_records;
- if (new_used_key_parts != NULL)
- *new_used_key_parts= best_key_parts;
DBUG_RETURN(TRUE);
}
@@ -30113,7 +31237,7 @@ uint get_index_for_order(ORDER *order, TABLE *table, SQL_SELECT *select,
if (select && select->quick)
return select->quick->index; // index or MAX_KEY, use quick select as is
else
- return table->file->key_used_on_scan; // MAX_KEY or index for some engines
+ return table->file->key_used_on_scan; // MAX_KEY or index for some engine
}
if (!is_simple_order(order)) // just to cut further expensive checks
@@ -30161,11 +31285,13 @@ uint get_index_for_order(ORDER *order, TABLE *table, SQL_SELECT *select,
DBUG_ASSERT(0);
}
else if (limit != HA_POS_ERROR)
- { // check if some index scan & LIMIT is more efficient than filesort
+ {
+ // check if some index scan & LIMIT is more efficient than filesort
/*
- Update opt_range_condition_rows since single table UPDATE/DELETE procedures
- don't call make_join_statistics() and leave this variable uninitialized.
+ Update opt_range_condition_rows since single table UPDATE/DELETE
+ procedures don't call make_join_statistics() and leave this
+ variable uninitialized.
*/
table->opt_range_condition_rows= table->stat_records();
@@ -30741,20 +31867,22 @@ bool build_notnull_conds_for_range_scans(JOIN *join, Item *cond,
@brief
Build not null conditions for inner nest tables of an outer join
- @param join the join for whose table nest not null conditions are to be built
+ @param join the join for whose table nest not null conditions are to be
+ built
@param nest_tbl the nest of the inner tables of an outer join
@details
- The function assumes that nest_tbl is the nest of the inner tables of an
- outer join and so an ON expression for this outer join is attached to
- nest_tbl.
- The function selects the tables of the nest_tbl that are not inner tables of
- embedded outer joins and then it calls build_notnull_conds_for_range_scans()
- for nest_tbl->on_expr and the bitmap for the selected tables. This call
- finds all fields belonging to the selected tables whose null-rejectedness
- can be inferred from the null-rejectedness of nest_tbl->on_expr. After this
- the function recursively finds all null_rejected fields for the remaining
- tables from the nest of nest_tbl.
+ The function assumes that nest_tbl is the nest of the inner tables
+ of an outer join and so an ON expression for this outer join is
+ attached to nest_tbl.
+ The function selects the tables of the nest_tbl that are not inner
+ tables of embedded outer joins and then it calls
+ build_notnull_conds_for_range_scans() for nest_tbl->on_expr and
+ the bitmap for the selected tables. This call finds all fields
+ belonging to the selected tables whose null-rejectedness can be
+ inferred from the null-rejectedness of nest_tbl->on_expr. After
+ this the function recursively finds all null_rejected fields for
+ the remaining tables from the nest of nest_tbl.
*/
static
@@ -30820,6 +31948,7 @@ void JOIN::init_join_cache_and_keyread()
break;
case JT_HASH:
case JT_ALL:
+ case JT_RANGE:
SQL_SELECT *select;
select= tab->select ? tab->select :
(tab->filesort ? tab->filesort->select : NULL);
@@ -30844,7 +31973,8 @@ void JOIN::init_join_cache_and_keyread()
/* purecov: end */
}
- if (table->file->keyread_enabled())
+ if (table->file->keyread_enabled() &&
+ !table->is_clustering_key(table->file->keyread))
{
/*
Here we set the read_set bitmap for all covering keys
@@ -30879,8 +32009,7 @@ void JOIN::init_join_cache_and_keyread()
c, which is not a problem as we read all the columns from the index
tuple.
*/
- if (!(table->file->index_flags(table->file->keyread, 0, 1) & HA_CLUSTERED_INDEX))
- table->mark_index_columns(table->file->keyread, table->read_set);
+ table->mark_index_columns(table->file->keyread, table->read_set);
}
if (tab->cache && tab->cache->init(select_options & SELECT_DESCRIBE))
revise_cache_usage(tab);
@@ -31212,7 +32341,6 @@ bool JOIN::transform_all_conds_and_on_exprs_in_join_list(
return false;
}
-
/**
@} (end of group Query_Optimizer)
*/
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 5b5fb8158ca..f908484444b 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -309,11 +309,7 @@ typedef struct st_join_table {
Table_access_tracker *tracker;
Table_access_tracker *jbuf_tracker;
- /*
- Bitmap of TAB_INFO_* bits that encodes special line for EXPLAIN 'Extra'
- column, or 0 if there is no info.
- */
- uint packed_info;
+ Time_and_counter_tracker *jbuf_unpack_tracker;
// READ_RECORD::Setup_func materialize_table;
READ_RECORD::Setup_func read_first_record;
@@ -326,7 +322,6 @@ typedef struct st_join_table {
*/
READ_RECORD::Setup_func save_read_first_record;/* to save read_first_record */
READ_RECORD::Read_func save_read_record;/* to save read_record.read_record */
- double worst_seeks;
key_map const_keys; /**< Keys with constant part */
key_map checked_keys; /**< Keys checked in find_best */
key_map needed_reg;
@@ -346,9 +341,22 @@ typedef struct st_join_table {
*/
double read_time;
+ /* Copy of POSITION::records_init, set by get_best_combination() */
+ double records_init;
+
/* Copy of POSITION::records_read, set by get_best_combination() */
double records_read;
-
+
+ /* Copy of POSITION::records_out, set by get_best_combination() */
+ double records_out;
+
+ /*
+ Copy of POSITION::read_time, set by get_best_combination(). The cost of
+ accessing the table in course of the join execution.
+ */
+ double join_read_time;
+ double join_loops;
+
/* The selectivity of the conditions that can be pushed to the table */
double cond_selectivity;
@@ -357,7 +365,32 @@ typedef struct st_join_table {
double partial_join_cardinality;
- table_map dependent,key_dependent;
+ /* set by estimate_scan_time() */
+ double cached_scan_and_compare_time;
+ double cached_forced_index_cost;
+
+ /*
+ dependent is the table that must be read before the current one
+ Used for example with STRAIGHT_JOIN or outer joins
+ */
+ table_map dependent;
+ /*
+ key_dependent is dependent but add those tables that are used to compare
+ with a key field in a simple expression. See add_key_field().
+ It is only used to prune searches in best_extension_by_limited_search()
+ */
+ table_map key_dependent;
+ /*
+ Tables that have expression in their attached condition clause that depends
+ on this table.
+ */
+ table_map related_tables;
+
+ /*
+ Bitmap of TAB_INFO_* bits that encodes special line for EXPLAIN 'Extra'
+ column, or 0 if there is no info.
+ */
+ uint packed_info;
/*
This is set for embedded sub queries. It contains the table map of
the outer expression, like 'A' in the following expression:
@@ -377,17 +410,21 @@ typedef struct st_join_table {
uint index;
uint status; ///< Save status for cache
uint used_fields;
+ uint cached_covering_key; // Set by estimate_scan_time()
ulong used_fieldlength;
ulong max_used_fieldlength;
uint used_blobs;
uint used_null_fields;
uint used_uneven_bit_fields;
- enum join_type type;
+ uint cached_forced_index;
+ enum join_type type, cached_forced_index_type;
/* If first key part is used for any key in 'key_dependent' */
bool key_start_dependent;
bool cached_eq_ref_table,eq_ref_table;
bool shortcut_for_distinct;
bool sorted;
+ bool cached_pfs_batch_update;
+
/*
If it's not 0 the number stored this field indicates that the index
scan has been chosen to access the table data and we expect to scan
@@ -536,10 +573,11 @@ typedef struct st_join_table {
Range_rowid_filter_cost_info *range_rowid_filter_info;
/* Rowid filter to be used when joining this join table */
Rowid_filter *rowid_filter;
- /* Becomes true just after the used range filter has been built / filled */
- bool is_rowid_filter_built;
+ /* True if the plan requires a rowid filter and it's not built yet */
+ bool need_to_build_rowid_filter;
- void build_range_rowid_filter_if_needed();
+ void build_range_rowid_filter();
+ void clear_range_rowid_filter();
void cleanup();
inline bool is_using_loose_index_scan()
@@ -646,11 +684,11 @@ typedef struct st_join_table {
{
return (is_hash_join_key_no(key) ? hj_key : table->key_info+key);
}
- double scan_time();
- ha_rows get_examined_rows();
+ void estimate_scan_time();
+ double get_examined_rows();
bool preread_init();
- bool pfs_batch_update(JOIN *join);
+ bool pfs_batch_update();
bool is_sjm_nest() { return MY_TEST(bush_children); }
@@ -933,12 +971,49 @@ public:
/* The table that's put into join order */
JOIN_TAB *table;
+ /* number of rows that will be read from the table */
+ double records_init;
+
/*
- The "fanout": number of output rows that will be produced (after
+ Number of rows left after filtering, calculated in best_access_path()
+ In case of use_cond_selectivity > 1 it contains rows after the used
+ rowid filter (if such one exists).
+ If use_cond_selectivity <= 1 it contains the minimum rows of any
+ rowid filtering or records_init if no filter exists.
+ */
+ double records_after_filter;
+
+ /*
+ Number of expected rows before applying the full WHERE clause. This
+ includes rowid filter and table->cond_selectivity if
+ use_cond_selectivity > 1. See matching_candidates_in_table().
+ Should normally not be used.
+ */
+ double records_read;
+
+ /*
+ The number of rows after applying the WHERE clause.
+
+ Same as the "fanout": number of output rows that will be produced (after
pushed down selection condition is applied) per each row combination of
previous tables.
+
+ In best_access_path() it is set to the minum number of accepted rows
+ for any possible access method or filter:
+
+ records_out takes into account table->cond_selectivity, the WHERE clause
+ related to this table calculated in calculate_cond_selectivity_for_table(),
+ and the used rowid filter.
+
+ After best_access_path() records_out it does not yet take into
+ account the part of the WHERE clause involving preceding tables.
+ records_out is updated in best_extension_by_limited_search() to take these
+ tables into account by calling table_after_join_selectivity().
*/
- double records_read;
+ double records_out;
+
+ /* Values from prev_record_reads call for EQ_REF table*/
+ double prev_record_reads, identical_keys;
/* The selectivity of the pushed down conditions */
double cond_selectivity;
@@ -946,10 +1021,13 @@ public:
/*
Cost accessing the table in course of the entire complete join execution,
i.e. cost of one access method use (e.g. 'range' or 'ref' scan ) times
- number the access method will be invoked.
+ number the access method will be invoked and checking the WHERE clause.
*/
double read_time;
+ /* record combinations before this table */
+ double loops;
+
double prefix_record_count;
/* Cost for the join prefix */
@@ -986,6 +1064,7 @@ public:
LooseScan_picker loosescan_picker;
Sj_materialization_picker sjmat_picker;
+ ulonglong refills;
/*
Current optimization state: Semi-join strategy to be used for this
and preceding join tables.
@@ -1002,18 +1081,21 @@ public:
/* Type of join (EQ_REF, REF etc) */
enum join_type type;
+
/*
Valid only after fix_semijoin_strategies_for_picked_join_order() call:
if sj_strategy!=SJ_OPT_NONE, this is the number of subsequent tables that
are covered by the specified semi-join strategy
*/
uint n_sj_tables;
-
+ uint forced_index; // If force_index() is used
/*
TRUE <=> join buffering will be used. At the moment this is based on
*very* imprecise guesses made in best_access_path().
*/
bool use_join_buffer;
+ /* True if we can use join_buffer togethere with firstmatch */
+ bool firstmatch_with_join_buf;
POSITION();
};
@@ -1236,6 +1318,13 @@ public:
bool hash_join;
bool do_send_rows;
table_map const_table_map;
+
+ /*
+ Tables one is allowed to use in choose_plan(). Either all or
+ set to a mapt of the tables in the materialized semi-join nest
+ */
+ table_map allowed_tables;
+
/**
Bitmap of semijoin tables that the current partial plan decided
to materialize and access by lookups
@@ -1343,7 +1432,9 @@ public:
int dbug_join_tab_array_size;
#endif
- /* We also maintain a stack of join optimization states in * join->positions[] */
+ /*
+ We also maintain a stack of join optimization states in join->positions[]
+ */
/******* Join optimization state members end *******/
/*
@@ -1570,8 +1661,6 @@ public:
/* SJM nests that are executed with SJ-Materialization strategy */
List<SJ_MATERIALIZATION_INFO> sjm_info_list;
- /** TRUE <=> ref_pointer_array is set to items3. */
- bool set_group_rpa;
/** Exec time only: TRUE <=> current group has been sent */
bool group_sent;
/**
@@ -1630,9 +1719,8 @@ public:
bool build_explain();
int reinit();
int init_execution();
- void exec();
-
- void exec_inner();
+ int exec() __attribute__((warn_unused_result));
+ int exec_inner();
bool prepare_result(List<Item> **columns_list);
int destroy();
void restore_tmp();
@@ -2368,7 +2456,7 @@ inline Item * or_items(THD *thd, Item* cond, Item *item)
{
return (cond ? (new (thd->mem_root) Item_cond_or(thd, cond, item)) : item);
}
-bool choose_plan(JOIN *join, table_map join_tables);
+bool choose_plan(JOIN *join, table_map join_tables, TABLE_LIST *emb_sjm_nest);
void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
table_map last_remaining_tables,
bool first_alt, uint no_jbuf_before,
@@ -2454,14 +2542,24 @@ bool instantiate_tmp_table(TABLE *table, KEY *keyinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
ulonglong options);
bool open_tmp_table(TABLE *table);
-double prev_record_reads(const POSITION *positions, uint idx, table_map found_ref);
void fix_list_after_tbl_changes(SELECT_LEX *new_parent, List<TABLE_LIST> *tlist);
-double get_tmp_table_lookup_cost(THD *thd, double row_count, uint row_size);
-double get_tmp_table_write_cost(THD *thd, double row_count, uint row_size);
void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array);
bool sort_and_filter_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse,
bool skip_unprefixed_keyparts);
+struct TMPTABLE_COSTS
+{
+ double create;
+ double lookup;
+ double write;
+ double avg_io_cost;
+ double cache_hit_ratio;
+ double block_size;
+};
+
+TMPTABLE_COSTS get_tmp_table_costs(THD *thd, double row_count, uint row_size,
+ bool blobs_used, bool add_row_copy_cost);
+
struct st_cond_statistic
{
Item *cond;
diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc
index d52d6071e89..10a32abe716 100644
--- a/sql/sql_servers.cc
+++ b/sql/sql_servers.cc
@@ -254,6 +254,8 @@ bool servers_init(bool dont_read_servers_table)
DBUG_RETURN(TRUE);
thd->thread_stack= (char*) &thd;
thd->store_globals();
+ thd->set_query_inner((char*) STRING_WITH_LEN("intern:servers_init"),
+ default_charset_info);
/*
It is safe to call servers_reload() since servers_* arrays and hashes which
will be freed there are global static objects and thus are initialized
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 320f9f4f97c..8879d5f61c1 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -50,6 +50,7 @@
#include "authors.h"
#include "contributors.h"
#include "sql_partition.h"
+#include "optimizer_defaults.h"
#ifdef HAVE_EVENT_SCHEDULER
#include "events.h"
#include "event_data_objects.h"
@@ -522,7 +523,7 @@ static struct show_privileges_st sys_privileges[]=
{"Show databases","Server Admin","To see all databases with SHOW DATABASES"},
{"Show view","Tables","To see views with SHOW CREATE VIEW"},
{"Shutdown","Server Admin", "To shut down the server"},
- {"Super","Server Admin","To use KILL thread, SET GLOBAL, CHANGE MASTER, etc."},
+ {"Super","Server Admin","To set few server variables"},
{"Trigger","Tables", "To use triggers"},
{"Create tablespace", "Server Admin", "To create/alter/drop tablespaces"},
{"Update", "Tables", "To update existing rows"},
@@ -8902,6 +8903,10 @@ bool optimize_schema_tables_reads(JOIN *join)
tab;
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
+ /*
+ The following is true for the temporary table that will hold the
+ final result.
+ */
if (!tab->table || !tab->table->pos_in_table_list)
continue;
@@ -8973,6 +8978,10 @@ bool get_schema_tables_result(JOIN *join,
tab;
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
{
+ /*
+ The following is true for the temporary table that will hold the
+ final result.
+ */
if (!tab->table || !tab->table->pos_in_table_list)
break;
@@ -9203,6 +9212,49 @@ int fill_key_cache_tables(THD *thd, TABLE_LIST *tables, COND *cond)
}
+/* Ensure we return 'OPTIMIZER_COST_UNDEF' if cost < 0 */
+
+static double fix_cost(double cost)
+{
+ return cost < 0 ? OPTIMIZER_COST_UNDEF : cost;
+}
+
+static int run_fill_optimizer_costs_tables(const LEX_CSTRING *name,
+ const OPTIMIZER_COSTS *costs,
+ TABLE *table)
+{
+ THD *thd= table->in_use;
+ DBUG_ENTER("run_fill_optimizer_costs_tables");
+
+ restore_record(table, s->default_values);
+ table->field[0]->store(name->str, name->length, system_charset_info);
+ table->field[1]->store(fix_cost(costs->disk_read_cost*1000.0));
+ table->field[2]->store(fix_cost(costs->index_block_copy_cost*1000.0));
+ table->field[3]->store(fix_cost(costs->key_cmp_cost*1000.0));
+ table->field[4]->store(fix_cost(costs->key_copy_cost*1000.0));
+ table->field[5]->store(fix_cost(costs->key_lookup_cost*1000.0));
+ table->field[6]->store(fix_cost(costs->key_next_find_cost*1000.0));
+ table->field[7]->store(fix_cost(costs->disk_read_ratio));
+ table->field[8]->store(fix_cost(costs->row_copy_cost*1000.0));
+ table->field[9]->store(fix_cost(costs->row_lookup_cost*1000.0));
+ table->field[10]->store(fix_cost(costs->row_next_find_cost*1000.0));
+ table->field[11]->store(fix_cost(costs->rowid_cmp_cost*1000.0));
+ table->field[12]->store(fix_cost(costs->rowid_copy_cost*1000.0));
+
+ DBUG_RETURN(schema_table_store_record(thd, table));
+}
+
+
+int fill_optimizer_costs_tables(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ DBUG_ENTER("fill_optimizer_costs_tables");
+
+ int res= process_optimizer_costs(run_fill_optimizer_costs_tables,
+ tables->table);
+ DBUG_RETURN(res);
+}
+
+
namespace Show {
ST_FIELD_INFO schema_fields_info[]=
@@ -9831,6 +9883,25 @@ ST_FIELD_INFO keycache_fields_info[]=
};
+ST_FIELD_INFO optimizer_costs_fields_info[]=
+{
+ Column("ENGINE", Varchar(NAME_LEN),NOT_NULL),
+ Column("OPTIMIZER_DISK_READ_COST", Decimal(906), NOT_NULL),
+ Column("OPTIMIZER_INDEX_BLOCK_COPY_COST", Decimal(906), NOT_NULL),
+ Column("OPTIMIZER_KEY_COMPARE_COST", Decimal(906), NOT_NULL),
+ Column("OPTIMIZER_KEY_COPY_COST", Decimal(906), NOT_NULL),
+ Column("OPTIMIZER_KEY_LOOKUP_COST", Decimal(906), NOT_NULL),
+ Column("OPTIMIZER_KEY_NEXT_FIND_COST", Decimal(906), NOT_NULL),
+ Column("OPTIMIZER_DISK_READ_RATIO", Decimal(906), NOT_NULL),
+ Column("OPTIMIZER_ROW_COPY_COST", Decimal(906), NOT_NULL),
+ Column("OPTIMIZER_ROW_LOOKUP_COST", Decimal(906), NOT_NULL),
+ Column("OPTIMIZER_ROW_NEXT_FIND_COST", Decimal(906), NOT_NULL),
+ Column("OPTIMIZER_ROWID_COMPARE_COST", Decimal(906), NOT_NULL),
+ Column("OPTIMIZER_ROWID_COPY_COST", Decimal(906), NOT_NULL),
+ CEnd()
+};
+
+
ST_FIELD_INFO show_explain_tabular_fields_info[]=
{
Column("id", SLonglong(3), NULLABLE, "id"),
@@ -9969,6 +10040,8 @@ ST_SCHEMA_TABLE schema_tables[]=
OPTIMIZE_I_S_TABLE|OPEN_TABLE_ONLY},
{"OPEN_TABLES", Show::open_tables_fields_info, 0,
fill_open_tables, make_old_format, 0, -1, -1, 1, 0},
+ {"OPTIMIZER_COSTS", Show::optimizer_costs_fields_info, 0,
+ fill_optimizer_costs_tables, 0, 0, -1,-1, 0, 0},
{"OPTIMIZER_TRACE", Show::optimizer_trace_info, 0,
fill_optimizer_trace_info, NULL, NULL, -1, -1, false, 0},
{"PARAMETERS", Show::parameters_fields_info, 0,
diff --git a/sql/sql_sort.h b/sql/sql_sort.h
index 6c9a81a32c9..88f719b3593 100644
--- a/sql/sql_sort.h
+++ b/sql/sql_sort.h
@@ -541,14 +541,25 @@ to be fixed later
class Sort_param {
public:
- uint rec_length; // Length of sorted records.
- uint sort_length; // Length of sorted columns.
+ // Length of sorted records. ALWAYS equal to sort_length + addon_length
+ uint rec_length;
+ /*
+ Length of what we need to sort: Sorted columns + ref_length if not
+ addon fields are used
+ */
+ uint sort_length;
+ /* Length of the reference to the row (rowid or primary key etc */
uint ref_length; // Length of record ref.
+ /* Length of all addon fields. 0 if no addon fields */
uint addon_length; // Length of addon_fields
- uint res_length; // Length of records in final sorted file/buffer.
+ /*
+ The length of the 'result' we are going to return to the caller for
+ each sort element. Also the length of data in final sorted file/buffer.
+ */
+ uint res_length;
uint max_keys_per_buffer; // Max keys / buffer.
uint min_dupl_count;
- ha_rows max_rows; // Select limit, or HA_POS_ERROR if unlimited.
+ ha_rows limit_rows; // Select limit, or HA_POS_ERROR if unlimited.
ha_rows examined_rows; // Number of examined rows.
TABLE *sort_form; // For quicker make_sortkey.
/**
@@ -579,10 +590,14 @@ public:
*/
tmp_buffer.set_charset(&my_charset_bin);
}
- void init_for_filesort(uint sortlen, TABLE *table,
- ha_rows maxrows, Filesort *filesort);
- void (*unpack)(TABLE *);
+ void init_for_filesort(TABLE *table, Filesort *filesort,
+ uint sortlen, ha_rows limit_rows_arg);
+ void setup_lengths_and_limit(TABLE *table,
+ uint sortlen,
+ uint addon_length,
+ ha_rows limit_rows_arg);
+ void (*unpack)(TABLE *);
/// Enables the packing of addons if possible.
void try_to_pack_addons(ulong max_length_for_sort_data);
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index 567dee84a3f..ecde847c8d4 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -3741,10 +3741,15 @@ void set_statistics_for_table(THD *thd, TABLE *table)
{
TABLE_STATISTICS_CB *stats_cb= &table->s->stats_cb;
Table_statistics *read_stats= stats_cb->table_stats;
- table->used_stat_records=
+
+ /*
+ The MAX below is to ensure that we don't return 0 rows for a table if it
+ not guaranteed to be empty.
+ */
+ table->used_stat_records=
(!check_eits_preferred(thd) ||
!table->stats_is_read || read_stats->cardinality_is_null) ?
- table->file->stats.records : read_stats->cardinality;
+ table->file->stats.records : MY_MAX(read_stats->cardinality, 1);
/*
For partitioned table, EITS statistics is based on data from all partitions.
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index d4639fb6bc4..14db5ad7c7d 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -1150,7 +1150,6 @@ String_copier::well_formed_copy(CHARSET_INFO *to_cs,
}
-
/*
Append characters to a single-quoted string '...', escaping special
characters with backslashes as necessary.
@@ -1168,6 +1167,8 @@ bool String::append_for_single_quote(const char *st, size_t len)
case '\\': APPEND(STRING_WITH_LEN("\\\\"));
case '\0': APPEND(STRING_WITH_LEN("\\0"));
case '\'': APPEND(STRING_WITH_LEN("\\'"));
+ case '\b': APPEND(STRING_WITH_LEN("\\b"));
+ case '\t': APPEND(STRING_WITH_LEN("\\t"));
case '\n': APPEND(STRING_WITH_LEN("\\n"));
case '\r': APPEND(STRING_WITH_LEN("\\r"));
case '\032': APPEND(STRING_WITH_LEN("\\Z"));
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index bc9856b4310..26a58e66ffe 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -2105,7 +2105,8 @@ static int sort_keys(KEY *a, KEY *b)
return -1;
/*
Long Unique keys should always be last unique key.
- Before this patch they used to change order wrt to partial keys (MDEV-19049)
+ Before this patch they used to change order wrt to partial keys
+ (MDEV-19049)
*/
if (a->algorithm == HA_KEY_ALG_LONG_HASH)
return 1;
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index b85b37b1726..6c2bbedef6d 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -383,7 +383,7 @@ void print_sjm(SJ_MATERIALIZATION_INFO *sjm)
}
fprintf(DBUG_FILE, " }\n");
fprintf(DBUG_FILE, " materialize_cost= %g\n",
- sjm->materialization_cost.total_cost());
+ sjm->materialization_cost);
fprintf(DBUG_FILE, " rows= %g\n", sjm->rows);
fprintf(DBUG_FILE, "}\n");
DBUG_UNLOCK_FILE;
@@ -698,14 +698,15 @@ void print_keyuse_array_for_trace(THD *thd, DYNAMIC_ARRAY *keyuse_array)
{
keyuse_elem.add("index", keyuse->table->key_info[keyuse->key].name);
}
- keyuse_elem.add("field", (keyuse->keypart == FT_KEYPART) ? "<fulltext>":
- (keyuse->is_for_hash_join() ?
- keyuse->table->field[keyuse->keypart]
- ->field_name.str :
- keyuse->table->key_info[keyuse->key]
- .key_part[keyuse->keypart]
- .field->field_name.str));
- keyuse_elem.add("equals",keyuse->val);
- keyuse_elem.add("null_rejecting",keyuse->null_rejecting);
+ keyuse_elem.
+ add("field", (keyuse->keypart == FT_KEYPART) ? "<fulltext>":
+ (keyuse->is_for_hash_join() ?
+ keyuse->table->field[keyuse->keypart]
+ ->field_name.str :
+ keyuse->table->key_info[keyuse->key]
+ .key_part[keyuse->keypart]
+ .field->field_name.str)).
+ add("equals",keyuse->val).
+ add("null_rejecting",keyuse->null_rejecting);
}
}
diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc
index 066d6b7483d..b4a7a0d5091 100644
--- a/sql/sql_tvc.cc
+++ b/sql/sql_tvc.cc
@@ -705,11 +705,11 @@ st_select_lex *wrap_tvc(THD *thd, st_select_lex *tvc_sl,
goto err;
wrapper_sl->select_number= ++thd->lex->stmt_lex->current_select_number;
wrapper_sl->parent_lex= lex; /* Used in init_query. */
- wrapper_sl->init_query();
- wrapper_sl->init_select();
+ wrapper_sl->make_empty_select();
wrapper_sl->nest_level= tvc_sl->nest_level;
wrapper_sl->parsing_place= tvc_sl->parsing_place;
+ wrapper_sl->distinct= tvc_sl->distinct;
wrapper_sl->set_linkage(tvc_sl->get_linkage());
wrapper_sl->exclude_from_table_unique_test=
tvc_sl->exclude_from_table_unique_test;
@@ -737,6 +737,7 @@ st_select_lex *wrap_tvc(THD *thd, st_select_lex *tvc_sl,
derived_unit->init_query();
derived_unit->thd= thd;
derived_unit->include_down(wrapper_sl);
+ derived_unit->distinct= tvc_sl->distinct;
/*
Attach the select used of TVC as the only slave to the unit for
@@ -953,8 +954,10 @@ Item *Item_func_in::in_predicate_to_in_subs_transformer(THD *thd,
if (!length || length > tmp_table_max_key_length() ||
args[0]->cols() > tmp_table_max_key_parts())
{
- trace_conv.add("done", false);
- trace_conv.add("reason", "key is too long");
+ if (unlikely(trace_conv.trace_started()))
+ trace_conv.
+ add("done", false).
+ add("reason", "key is too long");
return this;
}
@@ -962,15 +965,19 @@ Item *Item_func_in::in_predicate_to_in_subs_transformer(THD *thd,
{
if (!args[i]->const_item())
{
- trace_conv.add("done", false);
- trace_conv.add("reason", "non-constant element in the IN-list");
+ if (unlikely(trace_conv.trace_started()))
+ trace_conv.
+ add("done", false).
+ add("reason", "non-constant element in the IN-list");
return this;
}
if (cmp_row_types(args[i], args[0]))
{
- trace_conv.add("done", false);
- trace_conv.add("reason", "type mismatch");
+ if (unlikely(trace_conv.trace_started()))
+ trace_conv.
+ add("done", false).
+ add("reason", "type mismatch");
return this;
}
}
@@ -1005,7 +1012,9 @@ Item *Item_func_in::in_predicate_to_in_subs_transformer(THD *thd,
lex->init_select();
tvc_select= lex->current_select;
derived_unit= tvc_select->master_unit();
+ derived_unit->distinct= 1;
tvc_select->set_linkage(DERIVED_TABLE_TYPE);
+ tvc_select->distinct= 1;
/* Create TVC used in the transformation */
if (create_value_list_for_tvc(thd, &values))
@@ -1038,7 +1047,9 @@ Item *Item_func_in::in_predicate_to_in_subs_transformer(THD *thd,
sq_select->add_where_field(derived_unit->first_select());
sq_select->context.table_list= sq_select->table_list.first;
sq_select->context.first_name_resolution_table= sq_select->table_list.first;
- sq_select->table_list.first->derived_type= DTYPE_TABLE | DTYPE_MATERIALIZE;
+ sq_select->table_list.first->derived_type= (DTYPE_TABLE |
+ DTYPE_MATERIALIZE |
+ DTYPE_IN_PREDICATE);
lex->derived_tables|= DERIVED_SUBQUERY;
sq_select->where= 0;
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 02f068e9bbc..785ba2adccc 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -181,6 +181,8 @@ void udf_init()
initialized = 1;
new_thd->thread_stack= (char*) &new_thd;
new_thd->store_globals();
+ new_thd->set_query_inner((char*) STRING_WITH_LEN("intern:udf_init"),
+ default_charset_info);
new_thd->set_db(&MYSQL_SCHEMA_NAME);
tables.init_one_table(&new_thd->db, &MYSQL_FUNC_NAME, 0, TL_READ);
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 6f96069e46a..c1f28baeb25 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -355,8 +355,6 @@ select_unit::create_result_table(THD *thd_arg, List<Item> *column_types,
return TRUE;
table->keys_in_use_for_query.clear_all();
- for (uint i=0; i < table->s->fields; i++)
- table->field[i]->flags &= ~(PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG);
if (create_table)
{
@@ -395,9 +393,6 @@ select_union_recursive::create_result_table(THD *thd_arg,
return true;
incr_table->keys_in_use_for_query.clear_all();
- for (uint i=0; i < table->s->fields; i++)
- incr_table->field[i]->flags &= ~(PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG);
-
return false;
}
@@ -1304,6 +1299,7 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg,
bool instantiate_tmp_table= false;
bool single_tvc= !first_sl->next_select() && first_sl->tvc;
bool single_tvc_wo_order= single_tvc && !first_sl->order_list.elements;
+ bool distinct_key= 0;
DBUG_ENTER("st_select_lex_unit::prepare");
DBUG_ASSERT(thd == current_thd);
@@ -1407,15 +1403,17 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg,
{
case INTERSECT_TYPE:
have_intersect= TRUE;
- if (!s->distinct){
- have_except_all_or_intersect_all= true;
- }
+ if (!s->distinct)
+ have_except_all_or_intersect_all= TRUE;
break;
case EXCEPT_TYPE:
have_except= TRUE;
- if (!s->distinct){
+ if (!s->distinct)
have_except_all_or_intersect_all= TRUE;
- }
+ break;
+ case DERIVED_TABLE_TYPE:
+ if (s->distinct)
+ distinct_key= 1;
break;
default:
break;
@@ -1622,7 +1620,8 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg,
if (join_union_item_types(thd, types, union_part_count + 1))
goto err;
if (union_result->create_result_table(thd, &types,
- MY_TEST(union_distinct),
+ (MY_TEST(union_distinct) ||
+ distinct_key),
create_options,
&derived_arg->alias, false,
instantiate_tmp_table, false,
@@ -1645,7 +1644,7 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg,
res= derived_arg->derived_result->create_result_table(thd,
&types,
- FALSE,
+ distinct_key,
create_options,
&derived_arg->alias,
FALSE, FALSE,
@@ -1769,9 +1768,9 @@ cont:
union_result->create_result_table(thd, &types,
MY_TEST(union_distinct) ||
have_except_all_or_intersect_all ||
- have_intersect,
- create_options, &empty_clex_str, false,
- instantiate_tmp_table, false,
+ have_intersect || distinct_key,
+ create_options, &empty_clex_str,
+ false, instantiate_tmp_table, false,
hidden);
union_result->addon_cnt= hidden;
for (uint i= 0; i < hidden; i++)
@@ -2158,8 +2157,8 @@ bool st_select_lex_unit::exec()
ulonglong add_rows=0;
ha_rows examined_rows= 0;
bool first_execution= !executed;
- DBUG_ENTER("st_select_lex_unit::exec");
bool was_executed= executed;
+ DBUG_ENTER("st_select_lex_unit::exec");
if (executed && !uncacheable && !describe)
DBUG_RETURN(FALSE);
@@ -2246,7 +2245,7 @@ bool st_select_lex_unit::exec()
if (sl->tvc)
sl->tvc->exec(sl);
else
- sl->join->exec();
+ saved_error= sl->join->exec();
if (sl == union_distinct && !have_except_all_or_intersect_all &&
!(with_element && with_element->is_recursive))
{
@@ -2256,8 +2255,6 @@ bool st_select_lex_unit::exec()
DBUG_RETURN(TRUE);
table->no_keyread=1;
}
- if (!sl->tvc)
- saved_error= sl->join->error;
if (likely(!saved_error))
{
examined_rows+= thd->get_examined_row_count();
@@ -2404,7 +2401,8 @@ bool st_select_lex_unit::exec()
{
join->join_examined_rows= 0;
saved_error= join->reinit();
- join->exec();
+ if (join->exec())
+ saved_error= 1;
}
}
@@ -2510,8 +2508,7 @@ bool st_select_lex_unit::exec_recursive()
sl->tvc->exec(sl);
else
{
- sl->join->exec();
- saved_error= sl->join->error;
+ saved_error= sl->join->exec();
}
if (likely(!saved_error))
{
@@ -2523,11 +2520,10 @@ bool st_select_lex_unit::exec_recursive()
DBUG_RETURN(1);
}
}
- if (unlikely(saved_error))
+ else
{
thd->lex->current_select= lex_select_save;
goto err;
-
}
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index f56ec5c83c9..4173d6d82ec 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -581,8 +581,10 @@ int mysql_update(THD *thd,
set_statistics_for_table(thd, table);
select= make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
- if (unlikely(error || !limit || thd->is_error() ||
- (select && select->check_quick(thd, safe_update, limit))))
+ if (unlikely(error || thd->is_error() || !limit ||
+ (select && select->check_quick(thd, safe_update, limit)) ||
+ table->stat_records() == 0))
+
{
query_plan.set_impossible_where();
if (thd->lex->describe || thd->lex->analyze_stmt)
@@ -833,15 +835,15 @@ int mysql_update(THD *thd,
table->use_all_columns();
/*
- We are doing a search on a key that is updated. In this case
- we go trough the matching rows, save a pointer to them and
- update these in a separate loop based on the pointer.
+ We are doing a search on a key that is updated. In this case
+ we go trough the matching rows, save a pointer to them and
+ update these in a separate loop based on the pointer.
*/
explain->buf_tracker.on_scan_init();
IO_CACHE tempfile;
if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
- DISK_BUFFER_SIZE, MYF(MY_WME)))
- goto err;
+ DISK_CHUNK_SIZE, MYF(MY_WME)))
+ goto err;
/* If quick select is used, initialize it before retrieving rows. */
if (select && select->quick && select->quick->reset())
@@ -2271,6 +2273,7 @@ static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab,
case JT_REF:
case JT_REF_OR_NULL:
return !is_key_used(table, join_tab->ref.key, table->write_set);
+ case JT_RANGE:
case JT_ALL:
if (bitmap_is_overlapping(&table->tmp_set, table->write_set))
return FALSE;
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 30fba1bcf98..89251e33f7f 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -67,8 +67,8 @@ static void make_unique_view_field_name(THD *thd, Item *target,
List<Item> &item_list,
Item *last_element)
{
- const char *name= (target->orig_name ?
- target->orig_name :
+ const char *name= (target->orig_name.str ?
+ target->orig_name.str :
target->name.str);
size_t name_len;
uint attempt;
@@ -100,8 +100,8 @@ static void make_unique_view_field_name(THD *thd, Item *target,
itc.rewind();
}
- if (!target->orig_name)
- target->orig_name= target->name.str;
+ if (!target->orig_name.str)
+ target->orig_name= target->name;
target->set_name(thd, buff, name_len, system_charset_info);
}
@@ -186,7 +186,7 @@ void make_valid_column_names(THD *thd, List<Item> &item_list)
if (item->is_explicit_name() || !check_column_name(item->name.str))
continue;
name_len= my_snprintf(buff, NAME_LEN, "Name_exp_%u", column_no);
- item->orig_name= item->name.str;
+ item->orig_name= item->name;
item->set_name(thd, buff, name_len, system_charset_info);
}
diff --git a/sql/sql_window.cc b/sql/sql_window.cc
index 8716b596914..436a8db948d 100644
--- a/sql/sql_window.cc
+++ b/sql/sql_window.cc
@@ -423,6 +423,16 @@ ORDER *st_select_lex::find_common_window_func_partition_fields(THD *thd)
#define CMP_GT_C 1 // Greater than and compatible
#define CMP_GT 2 // Greater then
+
+/*
+ This function is used for sorting ORDER/PARTITION BY clauses of window
+ functions and so must implement an order relation on ORDER BY clauses"
+
+ It is called by a sorting function.
+ The function return's CMP_EQ (=0) if the values are identical.
+ If not equal, it returns a stable value < or > than 0.
+*/
+
static
int compare_order_elements(ORDER *ord1, int weight1,
ORDER *ord2, int weight2)
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 0fed9186dd2..2927b6ce8de 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -8277,7 +8277,7 @@ assign_to_keycache_parts:
key_cache_name:
ident { $$= $1; }
- | DEFAULT { $$ = default_key_cache_base; }
+ | DEFAULT { $$ = default_base; }
;
preload:
@@ -12502,13 +12502,8 @@ opt_procedure_or_into:
}
| into opt_select_lock_type
{
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_WARN_DEPRECATED_SYNTAX,
- ER_THD(thd, ER_WARN_DEPRECATED_SYNTAX),
- "<select expression> INTO <destination>;",
- "'SELECT <select list> INTO <destination>"
- " FROM...'");
$$= $2;
+ status_var_increment(thd->status_var.feature_into_outfile);
}
;
@@ -12731,6 +12726,7 @@ into_destination:
new (thd->mem_root)
select_export(thd, lex->exchange))))
MYSQL_YYABORT;
+ status_var_increment(thd->status_var.feature_into_outfile);
}
opt_load_data_charset
{ Lex->exchange->cs= $4; }
@@ -12753,6 +12749,7 @@ into_destination:
| select_var_list_init
{
Lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
+ status_var_increment(thd->status_var.feature_into_variable);
}
;
diff --git a/sql/structs.h b/sql/structs.h
index 0a71719376c..214fcb242ff 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -96,12 +96,22 @@ class engine_option_value;
struct ha_index_option_struct;
typedef struct st_key {
- uint key_length; /* total length of user defined key parts */
- ulong flags; /* dupp key and pack flags */
+ ulong flags; /* dupp key and pack flags */
+ ulong ext_key_flags; /* Flags for extended key */
+ ulong index_flags; /* Copy of handler->index_flags(index_number, 0, 1) */
+ uint key_length; /* total length of user defined key parts */
uint user_defined_key_parts; /* How many key_parts */
uint usable_key_parts; /* Should normally be = user_defined_key_parts */
- uint ext_key_parts; /* Number of key parts in extended key */
- ulong ext_key_flags; /* Flags for extended key */
+ uint ext_key_parts; /* Number of key parts in extended key */
+ uint block_size;
+ /*
+ The flag is on if statistical data for the index prefixes
+ has to be taken from the system statistical tables.
+ */
+ bool is_statistics_from_stat_tables;
+ bool without_overlaps;
+ bool is_ignored; // TRUE if index needs to be ignored
+
/*
Parts of primary key that are in the extension of this index.
@@ -123,13 +133,7 @@ typedef struct st_key {
/* Set of keys constraint correlated with this key */
key_map constraint_correlated;
LEX_CSTRING name;
- uint block_size;
enum ha_key_alg algorithm;
- /*
- The flag is on if statistical data for the index prefixes
- has to be taken from the system statistical tables.
- */
- bool is_statistics_from_stat_tables;
/*
Note that parser is used when the table is opened for use, and
parser_name is used when the table is being created.
@@ -167,12 +171,6 @@ typedef struct st_key {
ha_index_option_struct *option_struct; /* structure with parsed options */
double actual_rec_per_key(uint i);
-
- bool without_overlaps;
- /*
- TRUE if index needs to be ignored
- */
- bool is_ignored;
} KEY;
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 1ed3d61bcf8..28443c8f4f7 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -53,8 +53,9 @@
#include "debug_sync.h" // DEBUG_SYNC
#include "sql_show.h"
#include "opt_trace_context.h"
-
#include "log_event.h"
+#include "optimizer_defaults.h"
+
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
#include "../storage/perfschema/pfs_server.h"
#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */
@@ -723,13 +724,24 @@ Sys_binlog_direct(
CMD_LINE(OPT_ARG), DEFAULT(FALSE),
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(binlog_direct_check));
+static bool deprecated_explicit_defaults_for_timestamp(sys_var *self, THD *thd,
+ set_var *var)
+{
+ if (var->value && var->save_result.ulonglong_value == 0)
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT,
+ ER_THD(thd, ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT),
+ "explicit_defaults_for_timestamp=0"); // since 11.0.0
+ return false;
+}
static Sys_var_bit Sys_explicit_defaults_for_timestamp(
"explicit_defaults_for_timestamp",
"This option causes CREATE TABLE to create all TIMESTAMP columns "
"as NULL with DEFAULT NULL attribute, Without this option, "
"TIMESTAMP columns are NOT NULL and have implicit DEFAULT clauses.",
SESSION_VAR(option_bits), CMD_LINE(OPT_ARG),
- OPTION_EXPLICIT_DEF_TIMESTAMP, DEFAULT(TRUE), NO_MUTEX_GUARD, IN_BINLOG);
+ OPTION_EXPLICIT_DEF_TIMESTAMP, DEFAULT(TRUE), NO_MUTEX_GUARD, IN_BINLOG,
+ ON_CHECK(deprecated_explicit_defaults_for_timestamp));
static Sys_var_ulonglong Sys_bulk_insert_buff_size(
"bulk_insert_buffer_size", "Size of tree cache used in bulk "
@@ -1511,9 +1523,12 @@ static Sys_var_bit Sys_log_slow_admin_statements(
"log_slow_admin_statements",
"Log slow OPTIMIZE, ANALYZE, ALTER and other administrative statements "
"to the slow log if it is open. Resets or sets the option 'admin' in "
- "log_slow_disabled_statements",
+ "log_slow_filter. "
+ "Deprecated, use log_slow_filter without 'admin'.",
SESSION_VAR(log_slow_disabled_statements),
- CMD_LINE(OPT_ARG), REVERSE(LOG_SLOW_DISABLE_ADMIN), DEFAULT(TRUE));
+ CMD_LINE(OPT_ARG), REVERSE(LOG_SLOW_DISABLE_ADMIN), DEFAULT(TRUE),
+ 0, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(0),
+ DEPRECATED("'@@log_slow_filter'"));
static Sys_var_bit Sys_log_slow_slave_statements(
"log_slow_slave_statements",
@@ -6666,7 +6681,7 @@ static Sys_var_enum Sys_histogram_type(
"DOUBLE_PREC_HB - double precision height-balanced, "
"JSON_HB - height-balanced, stored as JSON.",
SESSION_VAR(histogram_type), CMD_LINE(REQUIRED_ARG),
- histogram_types, DEFAULT(1));
+ histogram_types, DEFAULT(2));
static Sys_var_mybool Sys_no_thread_alarm(
"debug_no_thread_alarm",
@@ -6932,20 +6947,6 @@ static Sys_var_mybool Sys_session_track_user_variables(
#endif //EMBEDDED_LIBRARY
-static Sys_var_uint Sys_in_subquery_conversion_threshold(
- "in_predicate_conversion_threshold",
- "The minimum number of scalar elements in the value list of "
- "IN predicate that triggers its conversion to IN subquery. Set to "
- "0 to disable the conversion.",
- SESSION_VAR(in_subquery_conversion_threshold), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, UINT_MAX), DEFAULT(IN_SUBQUERY_CONVERSION_THRESHOLD), BLOCK_SIZE(1));
-
-static Sys_var_ulong Sys_optimizer_max_sel_arg_weight(
- "optimizer_max_sel_arg_weight",
- "The maximum weight of the SEL_ARG graph. Set to 0 for no limit",
- SESSION_VAR(optimizer_max_sel_arg_weight), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, ULONG_MAX), DEFAULT(SEL_ARG::MAX_WEIGHT), BLOCK_SIZE(1));
-
static Sys_var_enum Sys_secure_timestamp(
"secure_timestamp", "Restricts direct setting of a session "
"timestamp. Possible levels are: YES - timestamp cannot deviate from "
@@ -6970,3 +6971,129 @@ static Sys_var_bit Sys_system_versioning_insert_history(
SESSION_VAR(option_bits), CMD_LINE(OPT_ARG),
OPTION_INSERT_HISTORY, DEFAULT(FALSE),
NO_MUTEX_GUARD, IN_BINLOG);
+
+/* Optimizer variables */
+
+static Sys_var_uint Sys_in_subquery_conversion_threshold(
+ "in_predicate_conversion_threshold",
+ "The minimum number of scalar elements in the value list of "
+ "IN predicate that triggers its conversion to IN subquery. Set to "
+ "0 to disable the conversion.",
+ SESSION_VAR(in_subquery_conversion_threshold), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, UINT_MAX), DEFAULT(IN_SUBQUERY_CONVERSION_THRESHOLD),
+ BLOCK_SIZE(1));
+
+static Sys_var_ulong Sys_optimizer_max_sel_arg_weight(
+ "optimizer_max_sel_arg_weight",
+ "The maximum weight of the SEL_ARG graph. Set to 0 for no limit",
+ SESSION_VAR(optimizer_max_sel_arg_weight), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, ULONG_MAX), DEFAULT(SEL_ARG::MAX_WEIGHT), BLOCK_SIZE(1));
+
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_disk_read_ratio(
+ "optimizer_disk_read_ratio",
+ "Chance that we have to do a disk read to find a row or index entry from "
+ "the engine cache (cache_misses/total_cache_requests). 0.0 means that "
+ "everything is cached and 1.0 means that nothing is expected to be in the "
+ "engine cache.",
+ COST_VAR(disk_read_ratio),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_DISK_READ_RATIO),
+ VALID_RANGE(0.0, 1.0), DEFAULT(DEFAULT_DISK_READ_RATIO), COST_ADJUST(1));
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_key_lookup_cost(
+ "optimizer_key_lookup_cost",
+ "Cost for finding a key based on a key value",
+ COST_VAR(key_lookup_cost),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_KEY_LOOKUP_COST),
+ VALID_RANGE(0, 1000), DEFAULT(DEFAULT_KEY_LOOKUP_COST), COST_ADJUST(1000));
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_row_lookup_cost(
+ "optimizer_row_lookup_cost",
+ "Cost of finding a row based on a rowid or a clustered key.",
+ COST_VAR(row_lookup_cost),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_ROW_LOOKUP_COST),
+ VALID_RANGE(0, 1000), DEFAULT(DEFAULT_ROW_LOOKUP_COST), COST_ADJUST(1000));
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_disk_read_cost(
+ "optimizer_disk_read_cost",
+ "Cost of reading a block of IO_SIZE (4096) from a disk (in usec).",
+ COST_VAR(disk_read_cost),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_DISK_READ_COST),
+ VALID_RANGE(0, 10000), DEFAULT(DEFAULT_DISK_READ_COST), COST_ADJUST(1000));
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_key_copy_cost(
+ "optimizer_key_copy_cost",
+ "Cost of finding the next key in the engine and copying it to the SQL "
+ "layer.",
+ COST_VAR(key_copy_cost),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_KEY_COPY_COST),
+ VALID_RANGE(0, 1000), DEFAULT(DEFAULT_KEY_COPY_COST), COST_ADJUST(1000));
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_index_block_copy_cost(
+ "optimizer_index_block_copy_cost",
+ "Cost of copying a key block from the cache to intern storage as part of "
+ "an index scan.",
+ COST_VAR(index_block_copy_cost),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_INDEX_BLOCK_COPY_COST),
+ VALID_RANGE(0, 1000), DEFAULT(DEFAULT_INDEX_BLOCK_COPY_COST), COST_ADJUST(1000));
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_row_next_find_cost(
+ "optimizer_row_next_find_cost",
+ "Cost of finding the next row when scanning the table.",
+ COST_VAR(row_next_find_cost),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_ROW_NEXT_FIND_COST),
+ VALID_RANGE(0, 1000), DEFAULT(DEFAULT_ROW_NEXT_FIND_COST), COST_ADJUST(1000));
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_key_next_find_cost(
+ "optimizer_key_next_find_cost",
+ "Cost of finding the next key and rowid when using filters.",
+ COST_VAR(key_next_find_cost),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_KEY_NEXT_FIND_COST),
+ VALID_RANGE(0, 1000), DEFAULT(DEFAULT_KEY_NEXT_FIND_COST), COST_ADJUST(1000));
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_row_copy_cost(
+ "optimizer_row_copy_cost",
+ "Cost of copying a row from the engine or the join cache to the SQL layer.",
+ COST_VAR(row_copy_cost),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_ROW_COPY_COST),
+ VALID_RANGE(0, 1000), DEFAULT(DEFAULT_ROW_COPY_COST), COST_ADJUST(1000));
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_key_cmp_cost(
+ "optimizer_key_compare_cost",
+ "Cost of checking a key against the end key condition.",
+ COST_VAR(key_cmp_cost),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_KEY_CMP_COST),
+ VALID_RANGE(0, 1000), DEFAULT(DEFAULT_KEY_COMPARE_COST), COST_ADJUST(1000));
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_rowid_cmp_cost(
+ "optimizer_rowid_compare_cost",
+ "Cost of comparing two rowid's",
+ COST_VAR(rowid_cmp_cost),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_ROWID_CMP_COST),
+ VALID_RANGE(0, 1000), DEFAULT(DEFAULT_ROWID_COMPARE_COST), COST_ADJUST(1000));
+
+static Sys_var_engine_optimizer_cost Sys_optimizer_rowid_copy_cost(
+ "optimizer_rowid_copy_cost",
+ "Cost of copying a rowid",
+ COST_VAR(rowid_copy_cost),
+ CMD_LINE(REQUIRED_ARG, OPT_COSTS_ROWID_COPY_COST),
+ VALID_RANGE(0, 1000), DEFAULT(DEFAULT_ROWID_COPY_COST), COST_ADJUST(1000));
+
+/* The following costs are stored in THD and handler */
+
+static Sys_var_optimizer_cost Sys_optimizer_where_cost(
+ "optimizer_where_cost",
+ "Cost of checking the row against the WHERE clause. Increasing this will "
+ "have the optimizer to prefer plans with less row combinations.",
+ SESSION_VAR(optimizer_where_cost),
+ CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, 100000), DEFAULT(DEFAULT_WHERE_COST), COST_ADJUST(1000));
+
+static Sys_var_optimizer_cost Sys_optimizer_scan_cost(
+ "optimizer_scan_setup_cost",
+ "Extra cost added to TABLE and INDEX scans to get optimizer to prefer "
+ "index lookups.",
+ SESSION_VAR(optimizer_scan_setup_cost),
+ CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, 100000000), DEFAULT(DEFAULT_TABLE_SCAN_SETUP_COST),
+ COST_ADJUST(1000));
diff --git a/sql/sys_vars.inl b/sql/sys_vars.inl
index b1d7bc31255..2c5acdcdc6b 100644
--- a/sql/sys_vars.inl
+++ b/sql/sys_vars.inl
@@ -32,6 +32,7 @@
#include "rpl_mi.h" // For Multi-Source Replication
#include "debug_sync.h"
#include "sql_acl.h" // check_global_access()
+#include "optimizer_defaults.h" // create_optimizer_costs
/*
a set of mostly trivial (as in f(X)=X) defines below to make system variable
@@ -40,6 +41,7 @@
#define VALID_RANGE(X,Y) X,Y
#define DEFAULT(X) X
#define BLOCK_SIZE(X) X
+#define COST_ADJUST(X) X
#define GLOBAL_VAR(X) sys_var::GLOBAL, (((char*)&(X))-(char*)&global_system_variables), sizeof(X)
#define SESSION_VAR(X) sys_var::SESSION, offsetof(SV, X), sizeof(((SV *)0)->X)
#define SESSION_ONLY(X) sys_var::ONLY_SESSION, offsetof(SV, X), sizeof(((SV *)0)->X)
@@ -1048,7 +1050,7 @@ public:
/* If no basename, assume it's for the key cache named 'default' */
if (!base_name->length)
- base_name= &default_key_cache_base;
+ base_name= &default_base;
key_cache= get_key_cache(base_name);
@@ -1198,7 +1200,6 @@ public:
option.var_type|= GET_DOUBLE;
option.min_value= (longlong) getopt_double2ulonglong(min_val);
option.max_value= (longlong) getopt_double2ulonglong(max_val);
- global_var(double)= (double)option.def_value;
SYSVAR_ASSERT(min_val < max_val);
SYSVAR_ASSERT(min_val <= def_val);
SYSVAR_ASSERT(max_val >= def_val);
@@ -1228,6 +1229,139 @@ public:
{ var->save_result.double_value= getopt_ulonglong2double(option.def_value); }
};
+
+/*
+ Optimizer costs
+ Stored as cost factor (1 cost = 1 ms).
+ Given and displayed as microsconds (as most values are very small)
+*/
+
+class Sys_var_optimizer_cost: public Sys_var_double
+{
+public:
+ double cost_adjust;
+ Sys_var_optimizer_cost(const char *name_arg,
+ const char *comment, int flag_args, ptrdiff_t off, size_t size,
+ CMD_LINE getopt,
+ double min_val, double max_val, double def_val,
+ ulong arg_cost_adjust, PolyLock *lock=0,
+ enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG,
+ on_check_function on_check_func=0,
+ on_update_function on_update_func=0,
+ const char *substitute=0)
+ :Sys_var_double(name_arg, comment, flag_args, off, size, getopt,
+ min_val, max_val, def_val * arg_cost_adjust, lock,
+ binlog_status_arg,
+ on_check_func,
+ on_update_func,
+ substitute)
+ {
+ cost_adjust= (double) arg_cost_adjust;
+ }
+ bool session_update(THD *thd, set_var *var)
+ {
+ session_var(thd, double)= var->save_result.double_value/cost_adjust;
+ return false;
+ }
+ bool global_update(THD *thd, set_var *var)
+ {
+ global_var(double)= var->save_result.double_value/cost_adjust;
+ return false;
+ }
+ void session_save_default(THD *thd, set_var *var)
+ { var->save_result.double_value= global_var(double) * cost_adjust; }
+
+ void global_save_default(THD *thd, set_var *var)
+ {
+ var->save_result.double_value= getopt_ulonglong2double(option.def_value);
+ }
+ const uchar *tmp_ptr(THD *thd) const
+ {
+ if (thd->sys_var_tmp.double_value > 0)
+ thd->sys_var_tmp.double_value*= cost_adjust;
+ return (uchar*) &thd->sys_var_tmp.double_value;
+ }
+ const uchar *session_value_ptr(THD *thd, const LEX_CSTRING *base) const
+ {
+ thd->sys_var_tmp.double_value= session_var(thd, double);
+ return tmp_ptr(thd);
+ }
+ const uchar *global_value_ptr(THD *thd, const LEX_CSTRING *base) const
+ {
+ thd->sys_var_tmp.double_value= global_var(double);
+ return tmp_ptr(thd);
+ }
+};
+
+
+/*
+ The class for optimizer costs with structured names, unique for each engine.
+ Used as 'engine.variable_name'
+
+ Class specific constructor arguments:
+ everything derived from Sys_var_optimizer_cost
+
+ Backing store: double
+
+ @note these variables can be only GLOBAL
+*/
+
+#define COST_VAR(X) GLOBAL_VAR(default_optimizer_costs.X)
+#define cost_var_ptr(KC, OFF) (((uchar*)(KC))+(OFF))
+#define cost_var(KC, OFF) (*(double*)cost_var_ptr(KC, OFF))
+
+class Sys_var_engine_optimizer_cost: public Sys_var_optimizer_cost
+{
+ public:
+ Sys_var_engine_optimizer_cost(const char *name_arg,
+ const char *comment, int flag_args, ptrdiff_t off, size_t size,
+ CMD_LINE getopt,
+ double min_val, double max_val, double def_val,
+ long cost_adjust, PolyLock *lock= 0,
+ const char *substitute=0)
+ : Sys_var_optimizer_cost(name_arg, comment, flag_args, off, size,
+ getopt, min_val, max_val, def_val, cost_adjust,
+ lock, VARIABLE_NOT_IN_BINLOG, 0,
+ 0, substitute)
+ {
+ option.var_type|= GET_ASK_ADDR;
+ option.value= (uchar**)1; // crash me, please
+ // fix an offset from global_system_variables to be an offset in OPTIMIZER_COSTS
+ offset= global_var_ptr() - (uchar*) &default_optimizer_costs;
+ SYSVAR_ASSERT(scope() == GLOBAL);
+ }
+ bool global_update(THD *thd, set_var *var)
+ {
+ double new_value= var->save_result.double_value;
+ LEX_CSTRING *base_name= &var->base;
+ OPTIMIZER_COSTS *optimizer_costs;
+
+ /* If no basename, assume it's for the default costs */
+ if (!base_name->length)
+ base_name= &default_base;
+
+ mysql_mutex_lock(&LOCK_optimizer_costs);
+ if (!(optimizer_costs= get_or_create_optimizer_costs(base_name->str,
+ base_name->length)))
+ {
+ mysql_mutex_unlock(&LOCK_optimizer_costs);
+ return true;
+ }
+ cost_var(optimizer_costs, offset)= new_value / cost_adjust;
+ mysql_mutex_unlock(&LOCK_optimizer_costs);
+ return 0;
+ }
+ const uchar *global_value_ptr(THD *thd, const LEX_CSTRING *base) const
+ {
+ OPTIMIZER_COSTS *optimizer_costs= get_optimizer_costs(base);
+ if (!optimizer_costs)
+ optimizer_costs= &default_optimizer_costs;
+ thd->sys_var_tmp.double_value= cost_var(optimizer_costs, offset);
+ return tmp_ptr(thd);
+ }
+};
+
+
/**
The class for the @max_user_connections.
It's derived from Sys_var_uint, but non-standard session value
diff --git a/sql/table.cc b/sql/table.cc
index ec3a97cc454..9ba9817d587 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -751,10 +751,10 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
LEX_STRING *keynames)
{
uint i, j, n_length;
+ uint primary_key_parts= 0;
KEY_PART_INFO *key_part= NULL;
ulong *rec_per_key= NULL;
- KEY_PART_INFO *first_key_part= NULL;
- uint first_key_parts= 0;
+ DBUG_ASSERT(keyinfo == first_keyinfo);
if (!keys)
{
@@ -763,15 +763,15 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
bzero((char*) keyinfo, len);
key_part= reinterpret_cast<KEY_PART_INFO*> (keyinfo);
}
+ bzero((char*)first_keyinfo, sizeof(*first_keyinfo));
/*
- If share->use_ext_keys is set to TRUE we assume that any key
- can be extended by the components of the primary key whose
- definition is read first from the frm file.
- For each key only those fields of the assumed primary key are
- added that are not included in the proper key definition.
- If after all it turns out that there is no primary key the
- added components are removed from each key.
+ If share->use_ext_keys is set to TRUE we assume that any not
+ primary key, can be extended by the components of the primary key
+ whose definition is read first from the frm file.
+ This code only allocates space for the extend key information as
+ we at this point don't know if there is a primary key or not.
+ The extend key information is added in init_from_binary_frm_image().
When in the future we support others schemes of extending of
secondary keys with components of the primary key we'll have
@@ -804,26 +804,31 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
if (i == 0)
{
- (*ext_key_parts)+= (share->use_ext_keys ? first_keyinfo->user_defined_key_parts*(keys-1) : 0);
+ /*
+ Allocate space for keys. We have to do it there as we need to know
+ the number of used_defined_key_parts for the first key when doing
+ this.
+ */
+ primary_key_parts= first_keyinfo->user_defined_key_parts;
+ (*ext_key_parts)+= (share->use_ext_keys ?
+ primary_key_parts*(keys-1) :
+ 0);
n_length=keys * sizeof(KEY) + *ext_key_parts * sizeof(KEY_PART_INFO);
if (!(keyinfo= (KEY*) alloc_root(&share->mem_root,
n_length + len)))
return 1;
- bzero((char*) keyinfo,n_length);
share->key_info= keyinfo;
+
+ /* Copy first keyinfo, read above */
+ memcpy((char*) keyinfo, (char*) first_keyinfo, sizeof(*keyinfo));
+ bzero(((char*) keyinfo) + sizeof(*keyinfo), n_length - sizeof(*keyinfo));
+
key_part= reinterpret_cast<KEY_PART_INFO*> (keyinfo + keys);
if (!(rec_per_key= (ulong*) alloc_root(&share->mem_root,
sizeof(ulong) * *ext_key_parts)))
return 1;
- first_key_part= key_part;
- first_key_parts= first_keyinfo->user_defined_key_parts;
- keyinfo->flags= first_keyinfo->flags;
- keyinfo->key_length= first_keyinfo->key_length;
- keyinfo->user_defined_key_parts= first_keyinfo->user_defined_key_parts;
- keyinfo->algorithm= first_keyinfo->algorithm;
- if (new_frm_ver >= 3)
- keyinfo->block_size= first_keyinfo->block_size;
+ bzero((char*) rec_per_key, sizeof(*rec_per_key) * *ext_key_parts);
}
keyinfo->key_part= key_part;
@@ -833,7 +838,7 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
if (strpos + (new_frm_ver >= 1 ? 9 : 7) >= frm_image_end)
return 1;
if (!(keyinfo->algorithm == HA_KEY_ALG_LONG_HASH))
- *rec_per_key++=0;
+ rec_per_key++;
key_part->fieldnr= (uint16) (uint2korr(strpos) & FIELD_NR_MASK);
key_part->offset= (uint) uint2korr(strpos+2)-1;
key_part->key_type= (uint) uint2korr(strpos+5);
@@ -857,48 +862,33 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
}
key_part->store_length=key_part->length;
}
+
+ keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
+ keyinfo->ext_key_flags= keyinfo->flags;
+ keyinfo->ext_key_part_map= 0;
+
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
{
+ /*
+ We should not increase keyinfo->ext_key_parts here as it will
+ later be changed to 1 as the engine will only see the generated hash
+ key.
+ */
keyinfo->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
- key_part++; // reserved for the hash value
- *rec_per_key++=0;
+ key_part++; // This will be set to point to the hash key
+ rec_per_key++; // Only one rec_per_key needed for the hash
+ share->ext_key_parts++;
}
- /*
- Add primary key to end of extended keys for non unique keys for
- storage engines that supports it.
- */
- keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
- keyinfo->ext_key_flags= keyinfo->flags;
- keyinfo->ext_key_part_map= 0;
- if (share->use_ext_keys && i && !(keyinfo->flags & HA_NOSAME))
+ if (i && share->use_ext_keys && !((keyinfo->flags & HA_NOSAME)))
{
- for (j= 0;
- j < first_key_parts && keyinfo->ext_key_parts < MAX_REF_PARTS;
- j++)
- {
- uint key_parts= keyinfo->user_defined_key_parts;
- KEY_PART_INFO* curr_key_part= keyinfo->key_part;
- KEY_PART_INFO* curr_key_part_end= curr_key_part+key_parts;
- for ( ; curr_key_part < curr_key_part_end; curr_key_part++)
- {
- if (curr_key_part->fieldnr == first_key_part[j].fieldnr)
- break;
- }
- if (curr_key_part == curr_key_part_end)
- {
- *key_part++= first_key_part[j];
- *rec_per_key++= 0;
- keyinfo->ext_key_parts++;
- keyinfo->ext_key_part_map|= 1 << j;
- }
- }
- if (j == first_key_parts)
- keyinfo->ext_key_flags= keyinfo->flags | HA_EXT_NOSAME;
+ /* Reserve place for extended key parts */
+ key_part+= primary_key_parts;
+ rec_per_key+= primary_key_parts;
+ share->ext_key_parts+= primary_key_parts; // For copy_keys_from_share()
}
- if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
- share->ext_key_parts++;
share->ext_key_parts+= keyinfo->ext_key_parts;
+ DBUG_ASSERT(share->ext_key_parts <= *ext_key_parts);
}
keynames->str= (char*) key_part;
keynames->length= strnmov(keynames->str, (char *) strpos,
@@ -1304,10 +1294,10 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
for (key_index= 0; key_index < table->s->keys; key_index++)
{
key=table->key_info + key_index;
- parts= key->user_defined_key_parts;
+ parts= key->user_defined_key_parts;
if (key->key_part[parts].fieldnr == field->field_index + 1)
break;
- }
+ }
if (!key || key->algorithm != HA_KEY_ALG_LONG_HASH)
goto end;
KEY_PART_INFO *keypart;
@@ -1338,7 +1328,13 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
field->vcol_info->set_vcol_type(VCOL_USING_HASH);
if (v->fix_and_check_expr(thd, table))
goto end;
- key->user_defined_key_parts= key->ext_key_parts= key->usable_key_parts= 1;
+ /*
+ The hash key used by unique consist of one key_part.
+ It is stored in key_parts after the used defined parts.
+ The engine will only see the hash.
+ */
+ key->user_defined_key_parts= key->usable_key_parts=
+ key->ext_key_parts= 1;
key->key_part+= parts;
if (key->flags & HA_NULL_PART_KEY)
@@ -2061,7 +2057,12 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
next_chunk+= str_db_type_length + 2;
}
- share->set_use_ext_keys_flag(plugin_hton(se_plugin)->flags & HTON_SUPPORTS_EXTENDED_KEYS);
+ /*
+ Check if engine supports extended keys. This is used by
+ create_key_infos() to allocate room for extended keys
+ */
+ share->set_use_ext_keys_flag(plugin_hton(se_plugin)->flags &
+ HTON_SUPPORTS_EXTENDED_KEYS);
if (create_key_infos(disk_buff + 6, frm_image_end, keys, keyinfo,
new_frm_ver, &ext_key_parts,
@@ -2309,7 +2310,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->keynames.count != keys))
goto err;
- /* Allocate handler */
+ /* Allocate handler */
if (!(handler_file= get_new_handler(share, thd->mem_root,
plugin_hton(se_plugin))))
goto err;
@@ -2807,6 +2808,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
DBUG_ASSERT((null_pos + (null_bit_pos + 7) / 8) <= share->field[0]->ptr);
}
+ share->primary_key= MAX_KEY;
+
/* Fix key->name and key_part->field */
if (key_parts)
{
@@ -2827,7 +2830,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
hash_keypart->type= HA_KEYTYPE_ULONGLONG;
hash_keypart->key_part_flag= 0;
hash_keypart->key_type= 32834;
- /* Last n fields are unique_index_hash fields*/
+ /* Last n fields are unique_index_hash fields */
hash_keypart->offset= offset;
hash_keypart->fieldnr= hash_field_used_no + 1;
hash_field= share->field[hash_field_used_no];
@@ -2841,7 +2844,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
offset+= HA_HASH_FIELD_LENGTH;
}
}
- uint add_first_key_parts= 0;
longlong ha_option= handler_file->ha_table_flags();
keyinfo= share->key_info;
uint primary_key= my_strcasecmp(system_charset_info,
@@ -2911,33 +2913,85 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
goto err;
}
+ uint add_first_key_parts= 0;
if (share->use_ext_keys)
{
if (primary_key >= MAX_KEY)
- {
- add_first_key_parts= 0;
- share->set_use_ext_keys_flag(FALSE);
- }
+ share->set_use_ext_keys_flag(false);
else
{
- add_first_key_parts= first_keyinfo.user_defined_key_parts;
- /*
- Do not add components of the primary key starting from
- the major component defined over the beginning of a field.
- */
- for (i= 0; i < first_keyinfo.user_defined_key_parts; i++)
- {
+ /* Add primary key to end of all non unique keys */
+
+ KEY *curr_keyinfo= keyinfo, *keyinfo_end= keyinfo+ keys;
+ KEY_PART_INFO *first_key_part= keyinfo->key_part;
+ uint first_key_parts= keyinfo->user_defined_key_parts;
+
+ /*
+ We are skipping the first key (primary key) as it cannot be
+ extended
+ */
+ while (++curr_keyinfo < keyinfo_end)
+ {
+ uint j;
+ if (!(curr_keyinfo->flags & HA_NOSAME))
+ {
+ KEY_PART_INFO *key_part= (curr_keyinfo->key_part +
+ curr_keyinfo->user_defined_key_parts);
+
+ /* Extend key with primary key parts */
+ for (j= 0;
+ j < first_key_parts &&
+ curr_keyinfo->ext_key_parts < MAX_REF_PARTS;
+ j++)
+ {
+ uint key_parts= curr_keyinfo->user_defined_key_parts;
+ KEY_PART_INFO *curr_key_part= curr_keyinfo->key_part;
+ KEY_PART_INFO *curr_key_part_end= curr_key_part+key_parts;
+
+ for ( ; curr_key_part < curr_key_part_end; curr_key_part++)
+ {
+ if (curr_key_part->fieldnr == first_key_part[j].fieldnr)
+ break;
+ }
+ if (curr_key_part == curr_key_part_end)
+ {
+ /* Add primary key part not part of the current index */
+ *key_part++= first_key_part[j];
+ curr_keyinfo->ext_key_parts++;
+ curr_keyinfo->ext_key_part_map|= 1 << j;
+ }
+ }
+ if (j == first_key_parts)
+ {
+ /* Full primary key added to secondary keys makes it unique */
+ curr_keyinfo->ext_key_flags= curr_keyinfo->flags | HA_EXT_NOSAME;
+ }
+ }
+ }
+ add_first_key_parts= keyinfo->user_defined_key_parts;
+
+ /*
+ If a primary key part is using a partial key, don't use it or any key part after
+ it.
+ */
+ for (i= 0; i < first_key_parts; i++)
+ {
uint fieldnr= keyinfo[0].key_part[i].fieldnr;
if (share->field[fieldnr-1]->key_length() !=
keyinfo[0].key_part[i].length)
- {
+ {
add_first_key_parts= i;
break;
}
}
- }
+ }
}
+ /* Primary key must be set early as engine may use it in index_flag() */
+ share->primary_key= (primary_key < MAX_KEY &&
+ share->keys_in_use.is_set(primary_key) ?
+ primary_key : MAX_KEY);
+
key_first_info= keyinfo;
for (uint key=0 ; key < keys ; key++,keyinfo++)
{
@@ -3091,12 +3145,17 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (handler_file->index_flags(key, i, 0) & HA_KEYREAD_ONLY)
{
share->keys_for_keyread.set_bit(key);
+ /*
+ part_of_key is used to check if we can use the field
+ as part of covering key (which implies HA_KEYREAD_ONLY).
+ */
field->part_of_key.set_bit(key);
- if (i < keyinfo->user_defined_key_parts)
- field->part_of_key_not_clustered.set_bit(key);
}
if (handler_file->index_flags(key, i, 1) & HA_READ_ORDER)
field->part_of_sortkey.set_bit(key);
+
+ if (i < keyinfo->user_defined_key_parts)
+ field->part_of_key_not_clustered.set_bit(key);
}
if (!(key_part->key_part_flag & HA_REVERSE_SORT) &&
usable_parts == i)
@@ -3180,7 +3239,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (primary_key < MAX_KEY &&
(share->keys_in_use.is_set(primary_key)))
{
- share->primary_key= primary_key;
+ DBUG_ASSERT(share->primary_key == primary_key);
/*
If we are using an integer as the primary key then allow the user to
refer to it as '_rowid'
@@ -3197,10 +3256,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
}
else
- share->primary_key = MAX_KEY; // we do not have a primary key
+ {
+ DBUG_ASSERT(share->primary_key == MAX_KEY);
+ }
}
- else
- share->primary_key= MAX_KEY;
if (new_field_pack_flag <= 1)
{
/* Old file format with default as not null */
@@ -3428,6 +3487,27 @@ err:
}
+/*
+ Make a copy of optimizer costs to be able to access these without any locks
+ and to allow the engine to update costs.
+*/
+
+void TABLE_SHARE::update_optimizer_costs(handlerton *hton)
+{
+ if (hton != view_pseudo_hton && !(hton->flags & HTON_HIDDEN))
+ {
+ mysql_mutex_lock(&LOCK_optimizer_costs);
+ memcpy(&optimizer_costs, hton->optimizer_costs, sizeof(optimizer_costs));
+ mysql_mutex_unlock(&LOCK_optimizer_costs);
+ }
+ else
+ {
+ bzero(&optimizer_costs, sizeof(optimizer_costs));
+ MEM_UNDEFINED(&optimizer_costs, sizeof(optimizer_costs));
+ }
+}
+
+
static bool sql_unusable_for_discovery(THD *thd, handlerton *engine,
const char *sql)
{
@@ -4036,6 +4116,11 @@ static void print_long_unique_table(TABLE *table)
}
#endif
+
+/**
+ Copy key information from TABLE_SHARE to TABLE
+*/
+
bool copy_keys_from_share(TABLE *outparam, MEM_ROOT *root)
{
TABLE_SHARE *share= outparam->s;
@@ -4045,14 +4130,16 @@ bool copy_keys_from_share(TABLE *outparam, MEM_ROOT *root)
KEY_PART_INFO *key_part;
if (!multi_alloc_root(root, &key_info, share->keys*sizeof(KEY),
- &key_part, share->ext_key_parts*sizeof(KEY_PART_INFO),
+ &key_part,
+ share->ext_key_parts*sizeof(KEY_PART_INFO),
NullS))
return 1;
outparam->key_info= key_info;
memcpy(key_info, share->key_info, sizeof(*key_info)*share->keys);
- memcpy(key_part, key_info->key_part, sizeof(*key_part)*share->ext_key_parts);
+ memcpy(key_part, key_info->key_part,
+ sizeof(*key_part)*share->ext_key_parts);
my_ptrdiff_t adjust_ptrs= PTR_BYTE_DIFF(key_part, key_info->key_part);
for (key_info_end= key_info + share->keys ;
@@ -4063,22 +4150,44 @@ bool copy_keys_from_share(TABLE *outparam, MEM_ROOT *root)
key_info->key_part= reinterpret_cast<KEY_PART_INFO*>
(reinterpret_cast<char*>(key_info->key_part) + adjust_ptrs);
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ /*
+ From the user point of view, this key is unique.
+ However from the engine point, the value is not unique
+ as there can be hash collisions.
+ */
key_info->flags&= ~HA_NOSAME;
+ }
}
+
+ /*
+ We have to copy key parts separately as LONG HASH has invisible
+ key parts not seen by key_info
+ */
for (KEY_PART_INFO *key_part_end= key_part+share->ext_key_parts;
key_part < key_part_end;
key_part++)
{
- Field *field= key_part->field= outparam->field[key_part->fieldnr - 1];
- if (field->key_length() != key_part->length &&
- !(field->flags & BLOB_FLAG))
+ /*
+ key_part->field is not set for key_parts that are here not used.
+ This can happen with extended keys where a secondary key
+ contains a primary key. In this case no key_info will contain
+ this key_part, but it can still be part of the memory region of
+ share->key_part.
+ */
+ if (key_part->field)
{
- /*
- We are using only a prefix of the column as a key:
- Create a new field for the key part that matches the index
- */
- field= key_part->field=field->make_new_field(root, outparam, 0);
- field->field_length= key_part->length;
+ Field *field= key_part->field= outparam->field[key_part->fieldnr - 1];
+ if (field->key_length() != key_part->length &&
+ !(field->flags & BLOB_FLAG))
+ {
+ /*
+ We are using only a prefix of the column as a key:
+ Create a new field for the key part that matches the index
+ */
+ field= key_part->field=field->make_new_field(root, outparam, 0);
+ field->field_length= key_part->length;
+ }
}
}
}
@@ -4320,15 +4429,15 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
for (uint k= 0; k < share->keys; k++)
{
- KEY &key_info= outparam->key_info[k];
- uint parts = (share->use_ext_keys ? key_info.ext_key_parts :
- key_info.user_defined_key_parts);
- for (uint p= 0; p < parts; p++)
+ KEY *key_info= &outparam->key_info[k];
+ uint parts= (share->use_ext_keys ? key_info->ext_key_parts :
+ key_info->user_defined_key_parts);
+ for (uint p=0; p < parts; p++)
{
- KEY_PART_INFO &kp= key_info.key_part[p];
- if (kp.field != outparam->field[kp.fieldnr - 1])
+ KEY_PART_INFO *kp= &key_info->key_part[p];
+ if (kp->field != outparam->field[kp->fieldnr - 1])
{
- kp.field->vcol_info = outparam->field[kp.fieldnr - 1]->vcol_info;
+ kp->field->vcol_info= outparam->field[kp->fieldnr - 1]->vcol_info;
}
}
}
@@ -5687,6 +5796,13 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
opt_range_condition_rows=0;
no_cache= false;
initialize_opt_range_structures();
+
+ /*
+ Update optimizer_costs to ensure that a SET STATEMENT of the
+ variables it will work.
+ */
+ file->set_optimizer_costs(thd);
+
#ifdef HAVE_REPLICATION
/* used in RBR Triggers */
master_had_triggers= 0;
@@ -7391,7 +7507,7 @@ MY_BITMAP *TABLE::prepare_for_keyread(uint index, MY_BITMAP *map)
DBUG_ENTER("TABLE::prepare_for_keyread");
if (!no_keyread)
file->ha_start_keyread(index);
- if (map != read_set || !(file->index_flags(index, 0, 1) & HA_CLUSTERED_INDEX))
+ if (map != read_set || !is_clustering_key(index))
{
mark_index_columns(index, map);
column_bitmaps_set(map);
@@ -8092,7 +8208,7 @@ void TABLE::restore_blob_values(String *blob_storage)
@param key_count number of keys to allocate additionally
@details
- The function allocates memory to fit additionally 'key_count' keys
+ The function allocates memory to fit additionally 'key_count' keys
for this table.
@return FALSE space was successfully allocated
@@ -8277,18 +8393,25 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
DBUG_ASSERT(key < max_keys);
char buf[NAME_CHAR_LEN];
- KEY* keyinfo;
+ KEY *keyinfo= key_info + key;
+ KEY_PART_INFO *key_part_info;
Field **reg_field;
uint i;
-
bool key_start= TRUE;
- KEY_PART_INFO* key_part_info=
- (KEY_PART_INFO*) alloc_root(&mem_root, sizeof(KEY_PART_INFO)*key_parts);
- if (!key_part_info)
+
+ keyinfo->name.length= sprintf(buf, "key%i", key);
+
+ if (!multi_alloc_root(&mem_root,
+ &key_part_info, sizeof(KEY_PART_INFO)*key_parts,
+ &keyinfo->rec_per_key,
+ sizeof(key_info->rec_per_key) * key_parts,
+ &keyinfo->name.str, keyinfo->name.length+1,
+ NullS))
return TRUE;
- keyinfo= key_info + key;
keyinfo->key_part= key_part_info;
- keyinfo->usable_key_parts= keyinfo->user_defined_key_parts = key_parts;
+ strmake((char*) keyinfo->name.str, buf, keyinfo->name.length);
+
+ keyinfo->usable_key_parts= keyinfo->user_defined_key_parts= key_parts;
keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
keyinfo->key_length=0;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
@@ -8297,14 +8420,6 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
keyinfo->is_statistics_from_stat_tables= FALSE;
if (unique)
keyinfo->flags|= HA_NOSAME;
- sprintf(buf, "key%i", key);
- keyinfo->name.length= strlen(buf);
- if (!(keyinfo->name.str= strmake_root(&mem_root, buf, keyinfo->name.length)))
- return TRUE;
- keyinfo->rec_per_key= (ulong*) alloc_root(&mem_root,
- sizeof(ulong)*key_parts);
- if (!keyinfo->rec_per_key)
- return TRUE;
bzero(keyinfo->rec_per_key, sizeof(ulong)*key_parts);
keyinfo->read_stats= NULL;
keyinfo->collected_stats= NULL;
@@ -8322,6 +8437,11 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
key_start= FALSE;
key_part_info++;
}
+ /*
+ We have to cache index_flags here as the table may be used by the
+ optimizer before it's opened.
+ */
+ keyinfo->index_flags= file->index_flags(key, 0, 1);
/*
For the case when there is a derived table that would give distinct rows,
@@ -8345,33 +8465,66 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
set_if_bigger(s->max_key_length, keyinfo->key_length);
s->keys++;
+ s->ext_key_parts+= keyinfo->ext_key_parts;
+ s->key_parts+= keyinfo->user_defined_key_parts;
return FALSE;
}
/*
@brief
- Drop all indexes except specified one.
+ Drop all indexes except specified one and optionally unique keys.
- @param key_to_save the key to save
+ @param key_to_save The key to save
+ @param map_to_update Bitmap showing some of the table's keys. Update it
+ to show the same keys, if they are not dropped.
+ @param unique_keys Keep unique keys
@details
- Drop all indexes on this table except 'key_to_save'. The saved key becomes
- key #0. Memory occupied by key parts of dropped keys are freed.
- If the 'key_to_save' is negative then all keys are freed.
+ Drop all indexes on this table except 'key_to_save' and unique keys.
+
+ The saved key becomes key #0. If key_to_save=-1 then only unique keys
+ remain.
*/
-void TABLE::use_index(int key_to_save)
+void TABLE::use_index(int key_to_save, key_map *map_to_update)
{
- uint i= 1;
DBUG_ASSERT(!created && key_to_save < (int)s->keys);
- if (key_to_save >= 0)
- /* Save the given key. */
- memmove(key_info, key_info + key_to_save, sizeof(KEY));
- else
- /* Drop all keys; */
- i= 0;
+ uint saved_keys= 0, key_parts= 0;
+ key_map new_bitmap;
+ new_bitmap.clear_all();
- s->keys= i;
+ /*
+ If we have key_to_save, move it to be key#0.
+ */
+ if (key_to_save != -1)
+ {
+ new_bitmap.set_bit(saved_keys);
+
+ KEY tmp_buff= key_info[saved_keys];
+ key_info[saved_keys]= key_info[key_to_save];
+ key_info[key_to_save]= tmp_buff;
+ key_parts= key_info[saved_keys].user_defined_key_parts;
+ saved_keys++;
+ }
+
+ /*
+ Now, move all unique keys to the front.
+ */
+ for (uint i= saved_keys; i < s->keys; i++)
+ {
+ if (key_info[i].flags & HA_NOSAME)
+ {
+ if (map_to_update->is_set(i))
+ new_bitmap.set_bit(saved_keys);
+ if (i != saved_keys)
+ key_info[saved_keys]= key_info[i];
+ key_parts+= key_info[saved_keys].user_defined_key_parts;
+ saved_keys++;
+ }
+ }
+ *map_to_update= new_bitmap;
+ s->keys= saved_keys;
+ s->key_parts= s->ext_key_parts= key_parts;
}
/*
@@ -8641,18 +8794,19 @@ bool TABLE_LIST::process_index_hints(TABLE *tbl)
index_group[INDEX_HINT_USE].merge(index_group[INDEX_HINT_FORCE]);
}
- /*
- TODO: get rid of tbl->force_index (on if any FORCE INDEX is specified)
- and create tbl->force_index_join instead.
- Then use the correct force_index_XX instead of the global one.
- */
- if (!index_join[INDEX_HINT_FORCE].is_clear_all() ||
- tbl->force_index_group || tbl->force_index_order)
+ if (!index_join[INDEX_HINT_FORCE].is_clear_all())
{
- tbl->force_index= TRUE;
+ tbl->force_index_join= TRUE;
index_join[INDEX_HINT_USE].merge(index_join[INDEX_HINT_FORCE]);
}
+ /*
+ TODO: get rid of tbl->force_index (on if any FORCE INDEX is specified)
+ Use the correct force_index_XX in all places instead of the global one.
+ */
+ tbl->force_index= (tbl->force_index_order | tbl->force_index_group |
+ tbl->force_index_join);
+
/* apply USE INDEX */
if (!index_join[INDEX_HINT_USE].is_clear_all() || have_empty_use_join)
tbl->keys_in_use_for_query.intersect(index_join[INDEX_HINT_USE]);
@@ -10439,10 +10593,27 @@ bool TABLE::export_structure(THD *thd, Row_definition_list *defs)
inline void TABLE::initialize_opt_range_structures()
{
TRASH_ALLOC((void*)&opt_range_keys, sizeof(opt_range_keys));
- TRASH_ALLOC(opt_range, s->keys * sizeof(*opt_range));
+ TRASH_ALLOC((void*)opt_range, s->keys * sizeof(*opt_range));
TRASH_ALLOC(const_key_parts, s->keys * sizeof(*const_key_parts));
}
+
+double TABLE::OPT_RANGE::index_only_fetch_cost(TABLE *table)
+{
+ return (table->file->cost(cost.index_cost)+
+ (double) rows * table->s->optimizer_costs.key_copy_cost);
+}
+
+void TABLE::OPT_RANGE::get_costs(ALL_READ_COST *res)
+{
+ res->index_cost= cost.index_cost;
+ res->row_cost= cost.row_cost;
+ res->copy_cost= cost.copy_cost;
+ res->max_index_blocks= max_index_blocks;
+ res->max_row_blocks= max_row_blocks;
+}
+
+
/*
Mark table to be reopened after query
*/
diff --git a/sql/table.h b/sql/table.h
index eda7f504e4f..d9c1231db6a 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -93,6 +93,7 @@ typedef ulonglong nested_join_map;
#define tmp_file_prefix "#sql" /**< Prefix for tmp tables */
#define tmp_file_prefix_length 4
#define TMP_TABLE_KEY_EXTRA 8
+#define ROCKSDB_DIRECTORY_NAME "#rocksdb"
/**
Enumerate possible types of a table from re-execution
@@ -813,6 +814,7 @@ struct TABLE_SHARE
return is_view ? view_pseudo_hton :
db_plugin ? plugin_hton(db_plugin) : NULL;
}
+ OPTIMIZER_COSTS optimizer_costs; /* Copy of get_optimizer_costs() */
enum row_type row_type; /* How rows are stored */
enum Table_type table_type;
enum tmp_table_type tmp_table;
@@ -888,6 +890,7 @@ struct TABLE_SHARE
bool has_update_default_function;
bool can_do_row_logging; /* 1 if table supports RBR */
bool long_unique_table;
+ bool optimizer_costs_inited;
ulong table_map_id; /* for row-based replication */
@@ -1209,6 +1212,8 @@ struct TABLE_SHARE
Item_func_hash *make_long_hash_func(THD *thd,
MEM_ROOT *mem_root,
List<Item> *field_list) const;
+
+ void update_optimizer_costs(handlerton *hton);
};
/* not NULL, but cannot be dereferenced */
@@ -1406,13 +1411,18 @@ public:
{
uint key_parts;
uint ranges;
- ha_rows rows;
- double cost;
+ ha_rows rows, max_index_blocks, max_row_blocks;
+ Cost_estimate cost;
+ /* Selectivity, in case of filters */
+ double selectivity;
+ bool first_key_part_has_only_one_value;
+
/*
- If there is a range access by i-th index then the cost of
- index only access for it is stored in index_only_costs[i]
+ Cost of fetching keys with index only read and returning them to the
+ sql level.
*/
- double index_only_cost;
+ double index_only_fetch_cost(TABLE *table);
+ void get_costs(ALL_READ_COST *cost);
} *opt_range;
/*
Bitmaps of key parts that =const for the duration of join execution. If
@@ -1499,6 +1509,9 @@ public:
*/
bool force_index;
+ /* Flag set when the statement contains FORCE INDEX FOR JOIN */
+ bool force_index_join;
+
/**
Flag set when the statement contains FORCE INDEX FOR ORDER BY
See TABLE_LIST::process_index_hints().
@@ -1681,7 +1694,7 @@ public:
bool unique);
void create_key_part_by_field(KEY_PART_INFO *key_part_info,
Field *field, uint fieldnr);
- void use_index(int key_to_save);
+ void use_index(int key_to_save, key_map *map_to_update);
void set_table_map(table_map map_arg, uint tablenr_arg)
{
map= map_arg;
@@ -1734,6 +1747,12 @@ public:
uint actual_n_key_parts(KEY *keyinfo);
ulong actual_key_flags(KEY *keyinfo);
int update_virtual_field(Field *vf, bool ignore_warnings);
+ inline size_t key_storage_length(uint index)
+ {
+ if (is_clustering_key(index))
+ return s->stored_rec_length;
+ return key_info[index].key_length + file->ref_length;
+ }
int update_virtual_fields(handler *h, enum_vcol_update_mode update_mode);
int update_default_fields(bool ignore_errors);
void evaluate_update_default_function();
@@ -1798,10 +1817,12 @@ public:
void prune_range_rowid_filters();
void trace_range_rowid_filters(THD *thd) const;
Range_rowid_filter_cost_info *
- best_range_rowid_filter_for_partial_join(uint access_key_no,
- double records,
- double access_cost_factor);
-
+ best_range_rowid_filter(uint access_key_no,
+ double records,
+ double fetch_cost,
+ double index_only_cost,
+ double prev_records,
+ double *records_out);
/**
System Versioning support
*/
@@ -1854,7 +1875,44 @@ public:
DBUG_ASSERT(s->period.name);
return field[s->period.end_fieldno];
}
+ inline void set_cond_selectivity(double selectivity)
+ {
+ DBUG_ASSERT(selectivity >= 0.0 && selectivity <= 1.0);
+ cond_selectivity= selectivity;
+ DBUG_PRINT("info", ("cond_selectivity: %g", cond_selectivity));
+ }
+ inline void multiply_cond_selectivity(double selectivity)
+ {
+ DBUG_ASSERT(selectivity >= 0.0 && selectivity <= 1.0);
+ cond_selectivity*= selectivity;
+ DBUG_PRINT("info", ("cond_selectivity: %g", cond_selectivity));
+ }
+ inline void set_opt_range_condition_rows(ha_rows rows)
+ {
+ if (opt_range_condition_rows > rows)
+ opt_range_condition_rows= rows;
+ }
+
+ /* Return true if the key is a clustered key */
+ inline bool is_clustering_key(uint index) const
+ {
+ return key_info[index].index_flags & HA_CLUSTERED_INDEX;
+ }
+ /*
+ Return true if we can use rowid filter with this index
+ rowid filter can be used if
+ - filter pushdown is supported by the engine for the index. If this is set then
+ file->ha_table_flags() should not contain HA_NON_COMPARABLE_ROWID!
+ - The index is not a clustered primary index
+ */
+
+ inline bool can_use_rowid_filter(uint index) const
+ {
+ return ((key_info[index].index_flags &
+ (HA_DO_RANGE_FILTER_PUSHDOWN | HA_CLUSTERED_INDEX)) ==
+ HA_DO_RANGE_FILTER_PUSHDOWN);
+ }
ulonglong vers_start_id() const;
ulonglong vers_end_id() const;
@@ -1951,7 +2009,8 @@ class IS_table_read_plan;
#define DTYPE_MERGE 4U
#define DTYPE_MATERIALIZE 8U
#define DTYPE_MULTITABLE 16U
-#define DTYPE_MASK (DTYPE_VIEW|DTYPE_TABLE|DTYPE_MULTITABLE)
+#define DTYPE_IN_PREDICATE 32U
+#define DTYPE_MASK (DTYPE_VIEW|DTYPE_TABLE|DTYPE_MULTITABLE|DTYPE_IN_PREDICATE)
/*
Phases of derived tables/views handling, see sql_derived.cc
@@ -2598,9 +2657,8 @@ struct TABLE_LIST
uint outer_join; /* Which join type */
uint shared; /* Used in multi-upd */
bool updatable; /* VIEW/TABLE can be updated now */
- bool straight; /* optimize with prev table */
+ bool straight; /* optimize with prev table */
bool updating; /* for replicate-do/ignore table */
- bool force_index; /* prefer index over table scan */
bool ignore_leaves; /* preload only non-leaf nodes */
bool crashed; /* Table was found crashed */
bool skip_locked; /* Skip locked in view defination */
diff --git a/sql/tztime.cc b/sql/tztime.cc
index 8e1a8805d96..a2add055deb 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -29,6 +29,7 @@
#pragma implementation // gcc: Class implementation
#endif
+#define VER "1.1"
#include "mariadb.h"
#if !defined(TZINFO2SQL) && !defined(TESTTIME)
#include "sql_priv.h"
@@ -43,6 +44,7 @@
#include <my_getopt.h>
#endif
+#include <welcome_copyright_notice.h>
#include "tztime.h"
#include "tzfile.h"
#include <m_string.h>
@@ -64,8 +66,6 @@
#endif /* !defined(DBUG_OFF) */
#endif /* defined(TZINFO2SQL) || defined(TESTTIME) */
-#define PROGRAM_VERSION "1.1"
-
/* Structure describing local time type (e.g. Moscow summer time (MSD)) */
typedef struct ttinfo
{
@@ -1623,6 +1623,8 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
DBUG_RETURN(1);
thd->thread_stack= (char*) &thd;
thd->store_globals();
+ thd->set_query_inner((char*) STRING_WITH_LEN("intern:my_tz_init"),
+ default_charset_info);
/* Init all memory structures that require explicit destruction */
if (my_hash_init(key_memory_tz_storage, &tz_names, &my_charset_latin1, 20, 0,
@@ -2666,12 +2668,6 @@ static my_bool get_one_option(const struct my_option *, const char *,
const char *);
C_MODE_END
-static void print_version(void)
-{
- printf("%s Ver %s Distrib %s, for %s (%s)\n",my_progname, PROGRAM_VERSION,
- MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE);
-}
-
static const char *default_timezone_dir= "/usr/share/zoneinfo/";
diff --git a/sql/uniques.cc b/sql/uniques.cc
index 572d80f0b64..36725e80a6b 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -103,7 +103,7 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
if (!max_elements)
max_elements= 1;
- (void) open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE,
+ (void) open_cached_file(&file, mysql_tmpdir, TEMP_PREFIX, DISK_CHUNK_SIZE,
MYF(MY_WME));
}
@@ -156,10 +156,10 @@ inline double log2_n_fact(double x)
the same length, so each of total_buf_size elements will be added to a sort
heap with (n_buffers-1) elements. This gives the comparison cost:
- total_buf_elems* log2(n_buffers) / TIME_FOR_COMPARE_ROWID;
+ total_buf_elems* log2(n_buffers) * ROWID_COMPARE_COST;
*/
-static double get_merge_buffers_cost(uint *buff_elems, uint elem_size,
+static double get_merge_buffers_cost(THD *thd, uint *buff_elems, uint elem_size,
uint *first, uint *last,
double compare_factor)
{
@@ -170,9 +170,9 @@ static double get_merge_buffers_cost(uint *buff_elems, uint elem_size,
size_t n_buffers= last - first + 1;
- /* Using log2(n)=log(n)/log(2) formula */
- return 2*((double)total_buf_elems*elem_size) / IO_SIZE +
- total_buf_elems*log((double) n_buffers) / (compare_factor * M_LN2);
+ return (2*((double)total_buf_elems*elem_size) / IO_SIZE *
+ default_optimizer_costs.disk_read_cost +
+ total_buf_elems*log2((double) n_buffers) * compare_factor);
}
@@ -185,6 +185,7 @@ static double get_merge_buffers_cost(uint *buff_elems, uint elem_size,
SYNOPSIS
get_merge_many_buffs_cost()
+ thd THD, used to get disk_read_cost
buffer buffer space for temporary data, at least
Unique::get_cost_calc_buff_size bytes
maxbuffer # of full buffers
@@ -203,7 +204,8 @@ static double get_merge_buffers_cost(uint *buff_elems, uint elem_size,
Cost of merge in disk seeks.
*/
-static double get_merge_many_buffs_cost(uint *buffer,
+static double get_merge_many_buffs_cost(THD *thd,
+ uint *buffer,
uint maxbuffer, uint max_n_elems,
uint last_n_elems, int elem_size,
double compare_factor)
@@ -231,13 +233,13 @@ static double get_merge_many_buffs_cost(uint *buffer,
uint lastbuff= 0;
for (i = 0; i <= (int) maxbuffer - MERGEBUFF*3/2; i += MERGEBUFF)
{
- total_cost+=get_merge_buffers_cost(buff_elems, elem_size,
+ total_cost+=get_merge_buffers_cost(thd, buff_elems, elem_size,
buff_elems + i,
buff_elems + i + MERGEBUFF-1,
compare_factor);
lastbuff++;
}
- total_cost+=get_merge_buffers_cost(buff_elems, elem_size,
+ total_cost+=get_merge_buffers_cost(thd, buff_elems, elem_size,
buff_elems + i,
buff_elems + maxbuffer,
compare_factor);
@@ -246,7 +248,7 @@ static double get_merge_many_buffs_cost(uint *buffer,
}
/* Simulate final merge_buff call. */
- total_cost += get_merge_buffers_cost(buff_elems, elem_size,
+ total_cost += get_merge_buffers_cost(thd, buff_elems, elem_size,
buff_elems, buff_elems + maxbuffer,
compare_factor);
return total_cost;
@@ -304,7 +306,7 @@ static double get_merge_many_buffs_cost(uint *buffer,
these will be random seeks.
*/
-double Unique::get_use_cost(uint *buffer, size_t nkeys, uint key_size,
+double Unique::get_use_cost(THD *thd, uint *buffer, size_t nkeys, uint key_size,
size_t max_in_memory_size,
double compare_factor,
bool intersect_fl, bool *in_memory)
@@ -312,7 +314,7 @@ double Unique::get_use_cost(uint *buffer, size_t nkeys, uint key_size,
size_t max_elements_in_tree;
size_t last_tree_elems;
size_t n_full_trees; /* number of trees in unique - 1 */
- double result;
+ double result, disk_read_cost;
max_elements_in_tree= ((size_t) max_in_memory_size /
ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size));
@@ -327,7 +329,7 @@ double Unique::get_use_cost(uint *buffer, size_t nkeys, uint key_size,
result= 2*log2_n_fact(last_tree_elems + 1.0);
if (n_full_trees)
result+= n_full_trees * log2_n_fact(max_elements_in_tree + 1.0);
- result /= compare_factor;
+ result *= compare_factor;
DBUG_PRINT("info",("unique trees sizes: %u=%u*%u + %u", (uint)nkeys,
(uint)n_full_trees,
@@ -345,14 +347,15 @@ double Unique::get_use_cost(uint *buffer, size_t nkeys, uint key_size,
First, add cost of writing all trees to disk, assuming that all disk
writes are sequential.
*/
- result += DISK_SEEK_BASE_COST * n_full_trees *
- ceil(((double) key_size)*max_elements_in_tree / IO_SIZE);
- result += DISK_SEEK_BASE_COST * ceil(((double) key_size)*last_tree_elems / IO_SIZE);
+ disk_read_cost= default_optimizer_costs.disk_read_cost;
+ result += disk_read_cost * n_full_trees *
+ ceil(((double) key_size)*max_elements_in_tree / DISK_CHUNK_SIZE);
+ result += disk_read_cost * ceil(((double) key_size)*last_tree_elems / DISK_CHUNK_SIZE);
/* Cost of merge */
if (intersect_fl)
key_size+= sizeof(element_count);
- double merge_cost= get_merge_many_buffs_cost(buffer, (uint)n_full_trees,
+ double merge_cost= get_merge_many_buffs_cost(thd, buffer, (uint)n_full_trees,
(uint)max_elements_in_tree,
(uint)last_tree_elems, key_size,
compare_factor);
@@ -361,7 +364,7 @@ double Unique::get_use_cost(uint *buffer, size_t nkeys, uint key_size,
Add cost of reading the resulting sequence, assuming there were no
duplicate elements.
*/
- result += ceil((double)key_size*nkeys/IO_SIZE);
+ result+= (ceil((double)key_size*nkeys/IO_SIZE) * disk_read_cost);
return result;
}
@@ -716,12 +719,12 @@ bool Unique::merge(TABLE *table, uchar *buff, size_t buff_size,
/* Open cached file for table records if it isn't open */
if (! my_b_inited(outfile) &&
- open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER,
+ open_cached_file(outfile, mysql_tmpdir, TEMP_PREFIX, DISK_CHUNK_SIZE,
MYF(MY_WME)))
return 1;
bzero((char*) &sort_param,sizeof(sort_param));
- sort_param.max_rows= elements;
+ sort_param.limit_rows= elements;
sort_param.sort_form= table;
sort_param.rec_length= sort_param.sort_length= sort_param.ref_length=
full_size;
diff --git a/sql/uniques.h b/sql/uniques.h
index 7e12a391fbd..ecc49794efe 100644
--- a/sql/uniques.h
+++ b/sql/uniques.h
@@ -75,10 +75,10 @@ public:
inline static double get_search_cost(ulonglong tree_elems,
double compare_factor)
{
- return log((double) tree_elems) / (compare_factor * M_LN2);
+ return log((double) tree_elems) * compare_factor / M_LN2;
}
- static double get_use_cost(uint *buffer, size_t nkeys, uint key_size,
+ static double get_use_cost(THD *thd, uint *buffer, size_t nkeys, uint key_size,
size_t max_in_memory_size, double compare_factor,
bool intersect_fl, bool *in_memory);
inline static int get_cost_calc_buff_size(size_t nkeys, uint key_size,
diff --git a/sql/winservice.c b/sql/winservice.c
index d4e3bb0944d..c5fba81051c 100644
--- a/sql/winservice.c
+++ b/sql/winservice.c
@@ -305,19 +305,19 @@ int get_mysql_service_properties(const wchar_t *bin_path,
}
/*
- If version could not be determined so far, try mysql_upgrade_info in
+ If version could not be determined so far, try mariadb_upgrade_info in
database directory.
*/
if(props->version_major == 0)
{
char buf[MAX_PATH];
- FILE *mysql_upgrade_info;
+ FILE *mariadb_upgrade_info;
- sprintf_s(buf, MAX_PATH, "%s\\mysql_upgrade_info", props->datadir);
- mysql_upgrade_info= fopen(buf, "r");
- if(mysql_upgrade_info)
+ sprintf_s(buf, MAX_PATH, "%s\\mariadb_upgrade_info", props->datadir);
+ mariadb_upgrade_info= fopen(buf, "r");
+ if(mariadb_upgrade_info)
{
- if (fgets(buf, MAX_PATH, mysql_upgrade_info))
+ if (fgets(buf, MAX_PATH, mariadb_upgrade_info))
{
int major,minor,patch;
if (sscanf(buf, "%d.%d.%d", &major, &minor, &patch) == 3)
diff --git a/sql/wsrep_check_opts.cc b/sql/wsrep_check_opts.cc
index b17a5f164a4..f35bda38848 100644
--- a/sql/wsrep_check_opts.cc
+++ b/sql/wsrep_check_opts.cc
@@ -85,7 +85,7 @@ int wsrep_check_opts()
}
}
- if (strcasecmp(wsrep_provider, "NONE"))
+ if (strcasecmp(wsrep_provider, WSREP_NONE))
{
if (global_system_variables.binlog_format != BINLOG_FORMAT_ROW)
{
diff --git a/sql/wsrep_event_service.cc b/sql/wsrep_event_service.cc
new file mode 100644
index 00000000000..a4fea6d418d
--- /dev/null
+++ b/sql/wsrep_event_service.cc
@@ -0,0 +1,23 @@
+/* Copyright 2021-2022 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "wsrep_event_service.h"
+
+wsrep::event_service*
+Wsrep_event_service::instance()
+{
+ static Wsrep_event_service instance;
+ return &instance;
+}
diff --git a/sql/wsrep_event_service.h b/sql/wsrep_event_service.h
new file mode 100644
index 00000000000..e28ffeec723
--- /dev/null
+++ b/sql/wsrep_event_service.h
@@ -0,0 +1,49 @@
+/* Copyright 2021-2022 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef WSREP_EVENT_SERVICE_H
+#define WSREP_EVENT_SERVICE_H
+
+/* wsrep-lib */
+#include "wsrep/event_service.hpp"
+
+/* implementation */
+#include "wsrep_status.h"
+
+class Wsrep_event_service : public wsrep::event_service
+{
+public:
+
+ void process_event(const std::string& name, const std::string& value)
+ override
+ {
+ if (name == "progress")
+ {
+ Wsrep_status::report_progress(value);
+ }
+ else if (name == "event")
+ {
+ Wsrep_status::report_event(value);
+ }
+ else
+ {
+ // not interested in the event
+ }
+ }
+
+ static wsrep::event_service* instance();
+};
+
+#endif /* WSREP_EVENT_SERVICE_H */
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index 9c11c802cc8..3a8bfe3532d 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -56,7 +56,6 @@
#include <sstream>
/* wsrep-lib */
-Wsrep_server_state* Wsrep_server_state::m_instance;
my_bool wsrep_emulate_bin_log= FALSE; // activating parts of binlog interface
my_bool wsrep_preordered_opt= FALSE;
@@ -868,12 +867,13 @@ int wsrep_init()
wsrep_init_position();
wsrep_sst_auth_init();
- if (strlen(wsrep_provider)== 0 ||
- !strcmp(wsrep_provider, WSREP_NONE))
+ if (!*wsrep_provider ||
+ !strcasecmp(wsrep_provider, WSREP_NONE))
{
// enable normal operation in case no provider is specified
global_system_variables.wsrep_on= 0;
- int err= Wsrep_server_state::instance().load_provider(wsrep_provider, wsrep_provider_options ? wsrep_provider_options : "");
+ int err= Wsrep_server_state::init_provider(
+ wsrep_provider, wsrep_provider_options ? wsrep_provider_options : "");
if (err)
{
DBUG_PRINT("wsrep",("wsrep::init() failed: %d", err));
@@ -915,14 +915,14 @@ int wsrep_init()
"wsrep_trx_fragment_size to 0 or use wsrep_provider that "
"supports streaming replication.",
wsrep_provider, global_system_variables.wsrep_trx_fragment_size);
- Wsrep_server_state::instance().unload_provider();
+ Wsrep_server_state::instance().deinit_provider();
Wsrep_server_state::deinit_provider_services();
return 1;
}
/* Now WSREP is fully initialized */
global_system_variables.wsrep_on= 1;
- WSREP_ON_= wsrep_provider && strcmp(wsrep_provider, WSREP_NONE);
+ WSREP_ON_= wsrep_provider && *wsrep_provider && strcasecmp(wsrep_provider, WSREP_NONE);
wsrep_service_started= 1;
wsrep_init_provider_status_variables();
@@ -931,7 +931,6 @@ int wsrep_init()
WSREP_DEBUG("SR storage init for: %s",
(wsrep_SR_store_type == WSREP_SR_STORE_TABLE) ? "table" : "void");
-
return 0;
}
@@ -992,7 +991,8 @@ void wsrep_init_startup (bool sst_first)
wsrep_plugins_pre_init();
/* Skip replication start if dummy wsrep provider is loaded */
- if (!strcmp(wsrep_provider, WSREP_NONE)) return;
+ if (!wsrep_provider || !*wsrep_provider ||
+ !strcasecmp(wsrep_provider, WSREP_NONE)) return;
/* Skip replication start if no cluster address */
if (!wsrep_cluster_address_exists()) return;
@@ -1006,6 +1006,11 @@ void wsrep_init_startup (bool sst_first)
wsrep_create_rollbacker();
wsrep_create_appliers(1);
+ if (Wsrep_server_state::init_options())
+ {
+ WSREP_WARN("Failed to initialize provider options");
+ }
+
Wsrep_server_state& server_state= Wsrep_server_state::instance();
/*
If the SST happens before server initialization, wait until the server
@@ -1037,7 +1042,7 @@ void wsrep_deinit(bool free_options)
DBUG_ASSERT(wsrep_inited == 1);
WSREP_DEBUG("wsrep_deinit");
- Wsrep_server_state::instance().unload_provider();
+ Wsrep_server_state::deinit_provider();
Wsrep_server_state::deinit_provider_services();
provider_name[0]= '\0';
@@ -3429,7 +3434,7 @@ ignore_error:
WSREP_WARN("Ignoring error '%s' on query. "
"Default database: '%s'. Query: '%s', Error_code: %d",
thd->get_stmt_da()->message(),
- print_slave_db_safe(thd->db.str),
+ safe_str(thd->db.str),
thd->query(),
error);
return 1;
diff --git a/sql/wsrep_plugin.cc b/sql/wsrep_plugin.cc
index d23c51b19e1..f062f005ad5 100644
--- a/sql/wsrep_plugin.cc
+++ b/sql/wsrep_plugin.cc
@@ -1,4 +1,4 @@
-/* Copyright 2016 Codership Oy <http://www.codership.com>
+/* Copyright 2016-2021 Codership Oy <http://www.codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -13,11 +13,329 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
-#include "wsrep_trans_observer.h"
-#include "wsrep_mysqld.h"
+/*
+ Wsrep plugin comes in two parts, wsrep_plugin and wsrep_provider_plugin.
+
+ If plugin-wsrep-provider=ON, wsrep_provider_options variable is disabled,
+ in favor of single options which are initialized from provider.
+*/
+
+#include "sql_plugin.h"
+#include "sql_priv.h"
+#include "sql_class.h"
+#include "set_var.h"
+#include "my_global.h"
+#include "mysqld_error.h"
#include <mysql/plugin.h>
+#include "wsrep_mysqld.h"
+#include "wsrep/provider_options.hpp"
+#include "wsrep_server_state.h"
+#include "wsrep_var.h" // wsrep_refresh_provider_options()
+
+#ifdef WITH_WSREP
+static bool provider_plugin_enabled= false;
+
+/* Prototype for provider system variables */
+static char *dummy_str= 0;
+__attribute__((unused))
+static MYSQL_SYSVAR_STR(proto_string, dummy_str, 0, 0, 0, 0, "");
+
+
+bool wsrep_provider_plugin_enabled()
+{
+ return provider_plugin_enabled;
+}
+
+/* Returns the name of the variable without prefix */
+static const char *sysvar_name(struct st_mysql_sys_var *var)
+{
+ const char *var_name= ((decltype(mysql_sysvar_proto_string) *) var)->name;
+ long unsigned int prefix_len= sizeof("wsrep_provider_") - 1;
+ return &var_name[prefix_len];
+}
+
+/* Returns option corresponding to the given sysvar */
+static const wsrep::provider_options::option *
+sysvar_to_option(struct st_mysql_sys_var *var)
+{
+ auto options= Wsrep_server_state::get_options();
+ if (!options)
+ {
+ return nullptr;
+ }
+ return options->get_option(sysvar_name(var));
+}
+
+/* Make a boolean option value */
+static std::unique_ptr<wsrep::provider_options::option_value>
+make_option_value(my_bool value)
+{
+ return std::unique_ptr<wsrep::provider_options::option_value>(
+ new wsrep::provider_options::option_value_bool(value));
+}
+
+/* Make a string option value */
+static std::unique_ptr<wsrep::provider_options::option_value>
+make_option_value(const char *value)
+{
+ return std::unique_ptr<wsrep::provider_options::option_value>(
+ new wsrep::provider_options::option_value_string(value));
+}
+
+/* Make a integer option value */
+static std::unique_ptr<wsrep::provider_options::option_value>
+make_option_value(long long value)
+{
+ return std::unique_ptr<wsrep::provider_options::option_value>(
+ new wsrep::provider_options::option_value_int(value));
+}
+
+/* Make a double option value */
+static std::unique_ptr<wsrep::provider_options::option_value>
+make_option_value(double value)
+{
+ return std::unique_ptr<wsrep::provider_options::option_value>(
+ new wsrep::provider_options::option_value_double(value));
+}
+
+/* Helper to get the actual value out of option_value */
+template <class T>
+static T get_option_value(wsrep::provider_options::option_value *value)
+{
+ return *((T *) value->get_ptr());
+}
+
+/* Same as above, specialized for strings */
+template <>
+char *get_option_value(wsrep::provider_options::option_value *value)
+{
+ return (char *) value->get_ptr();
+}
+
+/* Update function for sysvars */
+template <class T>
+static void wsrep_provider_sysvar_update(THD *thd,
+ struct st_mysql_sys_var *var,
+ void *var_ptr, const void *save)
+{
+ auto opt= sysvar_to_option(var);
+ if (!opt)
+ {
+ WSREP_ERROR("Could not match var to option");
+ my_error(ER_UNKNOWN_ERROR, MYF(0));
+ return;
+ }
+
+ T new_value= *((T *) save);
+
+ auto options= Wsrep_server_state::get_options();
+ if (options->set(opt->name(), std::move(make_option_value(new_value))))
+ {
+ my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), opt->name(),
+ make_option_value(new_value)->as_string());
+ return;
+ }
+
+ *((T *) var_ptr)= get_option_value<T>(opt->value());
+
+ wsrep_refresh_provider_options();
+}
+
+/* Convert option flags to corresponding sysvar flags */
+static int map_option_flags_to_sysvar(wsrep::provider_options::option *opt)
+{
+ int flags= 0;
+ if (opt->flags() & wsrep::provider_options::flag::readonly)
+ flags|= PLUGIN_VAR_READONLY;
+ if (opt->flags() & wsrep::provider_options::flag::deprecated)
+ flags|= PLUGIN_VAR_DEPRECATED;
+ return flags;
+}
+
+/* Helper to construct a sysvar of type string for the given option */
+static struct st_mysql_sys_var *
+make_sysvar_for_string_option(wsrep::provider_options::option *opt)
+{
+ char *dummy= 0;
+ MYSQL_SYSVAR_STR(proto_string,
+ dummy,
+ map_option_flags_to_sysvar(opt),
+ "Wsrep provider option",
+ 0,
+ wsrep_provider_sysvar_update<char *>,
+ get_option_value<char *>(opt->default_value()));
+ mysql_sysvar_proto_string.name= opt->name();
+ char **val= (char **) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(char *), MYF(0));
+ *val= get_option_value<char *>(opt->value());
+ mysql_sysvar_proto_string.value= val;
+ struct st_mysql_sys_var *var= (struct st_mysql_sys_var *) my_malloc(
+ PSI_NOT_INSTRUMENTED, sizeof(mysql_sysvar_proto_string), MYF(0));
+ memcpy(var, &mysql_sysvar_proto_string, sizeof(mysql_sysvar_proto_string));
+ return var;
+}
+
+/* Helper to construct a sysvar of type boolean for the given option */
+static struct st_mysql_sys_var *
+make_sysvar_for_bool_option(wsrep::provider_options::option *opt)
+{
+ my_bool dummy= 0;
+ MYSQL_SYSVAR_BOOL(proto_bool,
+ dummy,
+ map_option_flags_to_sysvar(opt),
+ "Wsrep provider option",
+ 0,
+ wsrep_provider_sysvar_update<my_bool>,
+ get_option_value<my_bool>(opt->default_value()));
+ mysql_sysvar_proto_bool.name= opt->name();
+ char *val= (char *) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(char), MYF(0));
+ *val= get_option_value<bool>(opt->value());
+ mysql_sysvar_proto_bool.value= val;
+ struct st_mysql_sys_var *var= (struct st_mysql_sys_var *) my_malloc(
+ PSI_NOT_INSTRUMENTED, sizeof(mysql_sysvar_proto_bool), MYF(0));
+ memcpy(var, &mysql_sysvar_proto_bool, sizeof(mysql_sysvar_proto_bool));
+ return var;
+}
+
+/* Helper to construct a integer sysvar for the given option */
+static struct st_mysql_sys_var *
+make_sysvar_for_integer_option(wsrep::provider_options::option *opt)
+{
+ long long dummy= 0;
+ MYSQL_SYSVAR_LONGLONG(proto_longlong,
+ dummy,
+ map_option_flags_to_sysvar(opt),
+ "Wsrep provider option",
+ 0,
+ wsrep_provider_sysvar_update<long long>,
+ get_option_value<long long>(opt->default_value()),
+ std::numeric_limits<long long>::min(),
+ std::numeric_limits<long long>::max(),
+ 0);
+ mysql_sysvar_proto_longlong.name= opt->name();
+ long long *val= (long long *) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(long long), MYF(0));
+ *val= get_option_value<long long>(opt->value());
+ mysql_sysvar_proto_longlong.value= val;
+ struct st_mysql_sys_var *var= (struct st_mysql_sys_var *) my_malloc(
+ PSI_NOT_INSTRUMENTED, sizeof(mysql_sysvar_proto_longlong), MYF(0));
+ memcpy(var, &mysql_sysvar_proto_longlong, sizeof(mysql_sysvar_proto_longlong));
+ return var;
+}
+
+/* Helper to construct a sysvar of type double for the given option */
+static struct st_mysql_sys_var *
+make_sysvar_for_double_option(wsrep::provider_options::option *opt)
+{
+ double dummy= 0;
+ MYSQL_SYSVAR_DOUBLE(proto_double,
+ dummy,
+ map_option_flags_to_sysvar(opt),
+ "Wsrep provider option",
+ 0,
+ wsrep_provider_sysvar_update<double>,
+ get_option_value<double>(opt->default_value()),
+ std::numeric_limits<double>::min(),
+ std::numeric_limits<double>::max(),
+ 0);
+ mysql_sysvar_proto_double.name= opt->name();
+ double *val= (double *) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(double), MYF(0));
+ *val= get_option_value<double>(opt->value());
+ mysql_sysvar_proto_double.value= val;
+ struct st_mysql_sys_var *var= (struct st_mysql_sys_var *) my_malloc(
+ PSI_NOT_INSTRUMENTED, sizeof(mysql_sysvar_proto_double), MYF(0));
+ memcpy(var, &mysql_sysvar_proto_double, sizeof(mysql_sysvar_proto_double));
+ return var;
+}
+
+/* Construct a sysvar corresponding to the given provider option */
+struct st_mysql_sys_var *
+wsrep_make_sysvar_for_option(wsrep::provider_options::option *opt)
+{
+ const int type_flag= opt->flags() & wsrep::provider_options::flag_type_mask;
+ switch (type_flag)
+ {
+ case wsrep::provider_options::flag::type_bool:
+ return make_sysvar_for_bool_option(opt);
+ case wsrep::provider_options::flag::type_integer:
+ return make_sysvar_for_integer_option(opt);
+ case wsrep::provider_options::flag::type_double:
+ return make_sysvar_for_double_option(opt);
+ default:
+ assert(type_flag == 0);
+ return make_sysvar_for_string_option(opt);
+ };
+}
+
+/* Free a sysvar */
+void wsrep_destroy_sysvar(struct st_mysql_sys_var *var)
+{
+ char **var_value= ((decltype(mysql_sysvar_proto_string) *) var)->value;
+ my_free(var_value);
+ my_free(var);
+}
+
+static int wsrep_provider_plugin_init(void *p)
+{
+ WSREP_DEBUG("wsrep_provider_plugin_init()");
+
+ if (!WSREP_ON)
+ {
+ sql_print_information("Plugin '%s' is disabled.", "wsrep-provider");
+ return 0;
+ }
+
+ provider_plugin_enabled= true;
+
+ // When plugin-wsrep-provider is enabled we set
+ // wsrep_provider_options parameter as READ_ONLY
+ sys_var *my_var= find_sys_var(current_thd, "wsrep_provider_options");
+ int flags= my_var->get_flags();
+ my_var->update_flags(flags |= (int)sys_var::READONLY);
+ return 0;
+}
+
+static int wsrep_provider_plugin_deinit(void *p)
+{
+ WSREP_DEBUG("wsrep_provider_plugin_deinit()");
+ sys_var *my_var= find_sys_var(current_thd, "wsrep_provider_options");
+ int flags= my_var->get_flags();
+ my_var->update_flags(flags &= (int)~sys_var::READONLY);
+ return 0;
+}
+
+struct Mysql_replication wsrep_provider_plugin = {
+ MYSQL_REPLICATION_INTERFACE_VERSION
+};
+
+maria_declare_plugin(wsrep_provider)
+{
+ MYSQL_REPLICATION_PLUGIN,
+ &wsrep_provider_plugin,
+ "wsrep_provider",
+ "Codership Oy",
+ "Wsrep provider plugin",
+ PLUGIN_LICENSE_GPL,
+ wsrep_provider_plugin_init,
+ wsrep_provider_plugin_deinit,
+ 0x0100,
+ NULL, /* Status variables */
+ /* System variables, this will be assigned by wsrep plugin below. */
+ NULL,
+ "1.0", /* Version (string) */
+ MariaDB_PLUGIN_MATURITY_ALPHA /* Maturity */
+}
+maria_declare_plugin_end;
+
+void wsrep_provider_plugin_set_sysvars(st_mysql_sys_var** vars)
+{
+ builtin_maria_wsrep_provider_plugin->system_vars= vars;
+}
+
+/*
+ Wsrep plugin
+*/
+
static int wsrep_plugin_init(void *p)
{
WSREP_DEBUG("wsrep_plugin_init()");
@@ -51,3 +369,5 @@ maria_declare_plugin(wsrep)
MariaDB_PLUGIN_MATURITY_STABLE /* Maturity */
}
maria_declare_plugin_end;
+
+#endif /* WITH_WSREP */
diff --git a/sql/wsrep_plugin.h b/sql/wsrep_plugin.h
new file mode 100644
index 00000000000..531d1d26a48
--- /dev/null
+++ b/sql/wsrep_plugin.h
@@ -0,0 +1,36 @@
+/* Copyright 2022 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef WSREP_PLUGIN_H
+#define WSREP_PLUGIN_H
+
+class option;
+struct st_mysql_sys_var;
+
+/* Returns true if provider plugin was initialized and is active */
+bool wsrep_provider_plugin_enabled();
+
+/* Set the given sysvars array for provider plugin.
+ Must be called before the plugin is initialized. */
+void wsrep_provider_plugin_set_sysvars(st_mysql_sys_var **);
+
+/* Construct a sysvar corresponding to the given provider option */
+struct st_mysql_sys_var *
+wsrep_make_sysvar_for_option(wsrep::provider_options::option *);
+
+/* Destroy a sysvar created by make_sysvar_for_option */
+void wsrep_destroy_sysvar(struct st_mysql_sys_var *);
+
+#endif /* WSREP_PLUGIN_H */
diff --git a/sql/wsrep_server_state.cc b/sql/wsrep_server_state.cc
index e173042362f..f80320fe216 100644
--- a/sql/wsrep_server_state.cc
+++ b/sql/wsrep_server_state.cc
@@ -1,4 +1,4 @@
-/* Copyright 2018 Codership Oy <info@codership.com>
+/* Copyright 2018-2022 Codership Oy <info@codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -17,7 +17,9 @@
#include "wsrep_api.h"
#include "wsrep_server_state.h"
#include "wsrep_allowlist_service.h"
+#include "wsrep_event_service.h"
#include "wsrep_binlog.h" /* init/deinit group commit */
+#include "wsrep_plugin.h" /* make/destroy sysvar helpers */
mysql_mutex_t LOCK_wsrep_server_state;
mysql_cond_t COND_wsrep_server_state;
@@ -29,6 +31,10 @@ PSI_cond_key key_COND_wsrep_server_state;
wsrep::provider::services Wsrep_server_state::m_provider_services;
+Wsrep_server_state* Wsrep_server_state::m_instance;
+std::unique_ptr<wsrep::provider_options> Wsrep_server_state::m_options;
+std::vector<st_mysql_sys_var*> Wsrep_server_state::m_sysvars;
+
Wsrep_server_state::Wsrep_server_state(const std::string& name,
const std::string& incoming_address,
const std::string& address,
@@ -74,6 +80,48 @@ void Wsrep_server_state::init_once(const std::string& name,
}
}
+int Wsrep_server_state::init_provider(const std::string& provider,
+ const std::string& options)
+{
+ DBUG_ASSERT(m_instance);
+ int ret= m_instance->load_provider(provider, options);
+ if (ret)
+ {
+ WSREP_ERROR("Failed to load provider %s with options %s",
+ provider.c_str(), options.c_str());
+ return ret;
+ }
+ return 0;
+}
+
+int Wsrep_server_state::init_options()
+{
+ if (!m_instance) return 1;
+ m_options= std::unique_ptr<wsrep::provider_options>(
+ new wsrep::provider_options(m_instance->provider()));
+ int ret= m_options->initial_options();
+ if (ret)
+ {
+ WSREP_ERROR("Failed to initialize provider options");
+ m_options = nullptr;
+ m_instance->unload_provider();
+ return ret;
+ }
+ m_options->for_each([](wsrep::provider_options::option *opt) {
+ struct st_mysql_sys_var *var= wsrep_make_sysvar_for_option(opt);
+ m_sysvars.push_back(var);
+ });
+ m_sysvars.push_back(nullptr);
+ wsrep_provider_plugin_set_sysvars(&m_sysvars[0]);
+ return 0;
+}
+
+void Wsrep_server_state::deinit_provider()
+{
+ m_options = nullptr;
+ m_instance->unload_provider();
+}
+
void Wsrep_server_state::destroy()
{
if (m_instance)
@@ -82,12 +130,21 @@ void Wsrep_server_state::destroy()
m_instance= 0;
mysql_mutex_destroy(&LOCK_wsrep_server_state);
mysql_cond_destroy(&COND_wsrep_server_state);
+ for (auto var : m_sysvars)
+ {
+ if (var)
+ {
+ wsrep_destroy_sysvar(var);
+ }
+ }
+ m_sysvars.clear();
}
}
void Wsrep_server_state::init_provider_services()
{
m_provider_services.allowlist_service= wsrep_allowlist_service_init();
+ m_provider_services.event_service= Wsrep_event_service::instance();
}
void Wsrep_server_state::deinit_provider_services()
diff --git a/sql/wsrep_server_state.h b/sql/wsrep_server_state.h
index 8759f7a9d84..d169e5b219d 100644
--- a/sql/wsrep_server_state.h
+++ b/sql/wsrep_server_state.h
@@ -1,4 +1,4 @@
-/* Copyright 2018 Codership Oy <info@codership.com>
+/* Copyright 2018-2021 Codership Oy <info@codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
/* wsrep-lib */
#include "wsrep/server_state.hpp"
#include "wsrep/provider.hpp"
+#include "wsrep/provider_options.hpp"
/* implementation */
#include "wsrep_server_service.h"
@@ -34,6 +35,10 @@ public:
const std::string& working_dir,
const wsrep::gtid& initial_position,
int max_protocol_version);
+ static int init_provider(const std::string& provider,
+ const std::string& options);
+ static int init_options();
+ static void deinit_provider();
static void destroy();
static Wsrep_server_state& instance()
@@ -51,6 +56,11 @@ public:
return instance().provider();
}
+ static wsrep::provider_options* get_options()
+ {
+ return m_options.get();
+ }
+
static bool has_capability(int capability)
{
return (get_provider().capabilities() & capability);
@@ -77,7 +87,11 @@ private:
Wsrep_server_service m_service;
static wsrep::provider::services m_provider_services;
static Wsrep_server_state* m_instance;
-
+ static std::unique_ptr<wsrep::provider_options> m_options;
+ // Sysvars for provider plugin. We keep these here because
+ // they are allocated dynamically and must be freed at some
+ // point during shutdown (after the plugin is deinitialized).
+ static std::vector<st_mysql_sys_var *> m_sysvars;
};
#endif // WSREP_SERVER_STATE_H
diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc
index 118bedecff3..b007357dbc3 100644
--- a/sql/wsrep_sst.cc
+++ b/sql/wsrep_sst.cc
@@ -310,7 +310,8 @@ bool wsrep_sst_donor_update (sys_var *self, THD* thd, enum_var_type type)
bool wsrep_before_SE()
{
- return (wsrep_provider != NULL
+ return (wsrep_provider
+ && *wsrep_provider
&& strcmp (wsrep_provider, WSREP_NONE)
&& strcmp (wsrep_sst_method, WSREP_SST_SKIP)
&& strcmp (wsrep_sst_method, WSREP_SST_MYSQLDUMP));
diff --git a/sql/wsrep_status.h b/sql/wsrep_status.h
index dd83dda2857..8d9291abeb5 100644
--- a/sql/wsrep_status.h
+++ b/sql/wsrep_status.h
@@ -42,6 +42,13 @@ public:
Wsrep_status::m_instance->report_progress(progress);
}
+ static void report_event(const std::string& event)
+ {
+ if (!Wsrep_status::m_instance) return;
+
+ Wsrep_status::m_instance->report_event(event);
+ }
+
static void report_log_msg(wsrep::reporter::log_level level,
const char* tag, size_t tag_len,
const char* buf, size_t buf_len,
diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc
index 0de1b034953..3343fcd2445 100644
--- a/sql/wsrep_var.cc
+++ b/sql/wsrep_var.cc
@@ -27,6 +27,7 @@
#include <cstdlib>
#include "wsrep_trans_observer.h"
#include "wsrep_server_state.h"
+#include "wsrep_plugin.h" /* wsrep_provider_plugin_is_enabled() */
ulong wsrep_reject_queries;
@@ -93,12 +94,17 @@ static bool refresh_provider_options()
}
}
+bool wsrep_refresh_provider_options()
+{
+ return refresh_provider_options();
+}
+
void wsrep_set_wsrep_on(THD* thd)
{
if (thd)
thd->wsrep_was_on= WSREP_ON_;
- WSREP_PROVIDER_EXISTS_= wsrep_provider &&
- strncasecmp(wsrep_provider, WSREP_NONE, FN_REFLEN);
+ WSREP_PROVIDER_EXISTS_= wsrep_provider && *wsrep_provider &&
+ strcasecmp(wsrep_provider, WSREP_NONE);
WSREP_ON_= global_system_variables.wsrep_on && WSREP_PROVIDER_EXISTS_;
}
@@ -108,10 +114,14 @@ bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type)
{
my_bool saved_wsrep_on= global_system_variables.wsrep_on;
- thd->variables.wsrep_on= global_system_variables.wsrep_on;
+ thd->variables.wsrep_on= saved_wsrep_on;
// If wsrep has not been inited we need to do it now
- if (global_system_variables.wsrep_on && wsrep_provider && !wsrep_inited)
+ if (!wsrep_inited &&
+ saved_wsrep_on &&
+ wsrep_provider &&
+ *wsrep_provider &&
+ strcasecmp(wsrep_provider, WSREP_NONE))
{
// wsrep_init() rewrites provide if it fails
char* tmp= strdup(wsrep_provider);
@@ -447,6 +457,12 @@ static int wsrep_provider_verify (const char* provider_str)
bool wsrep_provider_check (sys_var *self, THD* thd, set_var* var)
{
+ if (wsrep_provider_plugin_enabled())
+ {
+ my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), var->var->name.str, "read only");
+ return true;
+ }
+
char wsrep_provider_buf[FN_REFLEN];
if ((! var->save_result.string_value.str) ||
@@ -538,6 +554,11 @@ bool wsrep_provider_options_check(sys_var *self, THD* thd, set_var* var)
my_message(ER_WRONG_ARGUMENTS, "WSREP (galera) not started", MYF(0));
return true;
}
+ if (wsrep_provider_plugin_enabled())
+ {
+ my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), var->var->name.str, "read only");
+ return true;
+ }
return false;
}
diff --git a/sql/wsrep_var.h b/sql/wsrep_var.h
index 0f811d70928..3bebae7fb79 100644
--- a/sql/wsrep_var.h
+++ b/sql/wsrep_var.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2021 Codership Oy <info@codership.com>
+/* Copyright (C) 2013-2022 Codership Oy <info@codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -37,6 +37,7 @@ class THD;
int wsrep_init_vars();
void wsrep_set_wsrep_on(THD *thd);
+bool wsrep_refresh_provider_options();
#define CHECK_ARGS (sys_var *self, THD* thd, set_var *var)
#define UPDATE_ARGS (sys_var *self, THD* thd, enum_var_type type)
diff --git a/storage/archive/archive_reader.c b/storage/archive/archive_reader.c
index 0e02127ea32..85637a04391 100644
--- a/storage/archive/archive_reader.c
+++ b/storage/archive/archive_reader.c
@@ -26,7 +26,7 @@
#define BUFFER_LEN 1024
#define ARCHIVE_ROW_HEADER_SIZE 4
-#define SHOW_VERSION "0.1"
+#define VER "0.1"
static void get_options(int *argc,char * * *argv);
static void print_version(void);
@@ -400,12 +400,6 @@ static void usage(void)
my_print_help(my_long_options);
}
-static void print_version(void)
-{
- printf("%s Ver %s Distrib %s, for %s (%s)\n", my_progname, SHOW_VERSION,
- MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE);
-}
-
static void get_options(int *argc, char ***argv)
{
load_defaults_or_exit("my", load_default_groups, argc, argv);
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index 19a0ffe028a..2a8deb431b1 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -132,7 +132,8 @@ extern "C" PSI_file_key arch_key_file_data;
static handler *archive_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root);
-int archive_discover(handlerton *hton, THD* thd, TABLE_SHARE *share);
+static int archive_discover(handlerton *hton, THD* thd, TABLE_SHARE *share);
+static void archive_update_optimizer_costs(OPTIMIZER_COSTS *costs);
/*
Number of rows that will force a bulk insert.
@@ -205,6 +206,7 @@ static const char *ha_archive_exts[] = {
NullS
};
+
int archive_db_init(void *p)
{
DBUG_ENTER("archive_db_init");
@@ -217,10 +219,10 @@ int archive_db_init(void *p)
archive_hton= (handlerton *)p;
archive_hton->db_type= DB_TYPE_ARCHIVE_DB;
archive_hton->create= archive_create_handler;
- archive_hton->flags= HTON_NO_FLAGS;
archive_hton->discover_table= archive_discover;
archive_hton->tablefile_extensions= ha_archive_exts;
-
+ archive_hton->update_optimizer_costs= archive_update_optimizer_costs;
+ archive_hton->flags= HTON_NO_FLAGS;
DBUG_RETURN(0);
}
@@ -267,7 +269,7 @@ ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg)
archive_reader_open= FALSE;
}
-int archive_discover(handlerton *hton, THD* thd, TABLE_SHARE *share)
+static int archive_discover(handlerton *hton, THD* thd, TABLE_SHARE *share)
{
DBUG_ENTER("archive_discover");
DBUG_PRINT("archive_discover", ("db: '%s' name: '%s'", share->db.str,
@@ -1092,6 +1094,54 @@ int ha_archive::index_init(uint keynr, bool sorted)
DBUG_RETURN(0);
}
+#define ARCHIVE_DECOMPRESS_TIME 0.081034543792841 // See optimizer_costs.txt
+
+static void archive_update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+ costs->disk_read_ratio= 0.20; // Assume 80 % of data is cached by system
+ costs->row_lookup_cost= 0; // See rnd_pos_time
+ costs->key_lookup_cost= 0; // See key_read_time
+ costs->key_next_find_cost= 0; // Only unique indexes
+ costs->index_block_copy_cost= 0;
+}
+
+
+IO_AND_CPU_COST ha_archive::scan_time()
+{
+ IO_AND_CPU_COST cost;
+ ulonglong blocks;
+ DBUG_ENTER("ha_archive::scan_time");
+
+ blocks= stats.data_file_length / IO_SIZE;
+ cost.io= 0; // No cache
+ cost.cpu= (blocks * DISK_READ_COST * DISK_READ_RATIO +
+ blocks* ARCHIVE_DECOMPRESS_TIME);
+ DBUG_RETURN(cost);
+}
+
+
+IO_AND_CPU_COST ha_archive::keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks)
+{
+ IO_AND_CPU_COST cost= scan_time();
+ /*
+ As these is an unique indexe, assume that we have to scan half the file for
+ each range to find the row.
+ */
+ cost.cpu= cost.cpu * ranges / 2;
+ return cost;
+}
+
+
+IO_AND_CPU_COST ha_archive::rnd_pos_time(ha_rows rows)
+{
+ IO_AND_CPU_COST cost;
+ /* We have to do one azseek() for each row */
+ cost.io= rows2double(rows);
+ cost.cpu= rows * (DISK_READ_COST * DISK_READ_RATIO + ARCHIVE_DECOMPRESS_TIME);
+ return cost;
+}
+
/*
No indexes, so if we get a request for an index search since we tell
@@ -1116,8 +1166,6 @@ int ha_archive::index_read_idx(uchar *buf, uint index, const uchar *key,
current_k_offset= mkey->key_part->offset;
current_key= key;
current_key_len= key_len;
-
-
DBUG_ENTER("ha_archive::index_read_idx");
rc= rnd_init(TRUE);
diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h
index 2e03ac639b5..00d8a56acba 100644
--- a/storage/archive/ha_archive.h
+++ b/storage/archive/ha_archive.h
@@ -109,6 +109,10 @@ public:
uint max_supported_key_length() const { return sizeof(ulonglong); }
uint max_supported_key_part_length() const { return sizeof(ulonglong); }
ha_rows records() { return share->rows_recorded; }
+ IO_AND_CPU_COST scan_time() override;
+ IO_AND_CPU_COST keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks) override;
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows) override;
int index_init(uint keynr, bool sorted);
virtual int index_read(uchar * buf, const uchar * key,
uint key_len, enum ha_rkey_function find_flag);
diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc
index 0134032351e..343f3c70286 100644
--- a/storage/blackhole/ha_blackhole.cc
+++ b/storage/blackhole/ha_blackhole.cc
@@ -182,6 +182,17 @@ int ha_blackhole::info(uint flag)
DBUG_ENTER("ha_blackhole::info");
bzero((char*) &stats, sizeof(stats));
+ /*
+ The following is required to get replication to work as otherwise
+ test_quick_select() will think the table is empty and thus any
+ update/delete will not have any rows to update.
+ */
+ stats.records= 2;
+ /*
+ Block size should not be 0 as this will cause division by zero
+ in scan_time()
+ */
+ stats.block_size= 8192;
if (flag & HA_STATUS_AUTO)
stats.auto_increment_value= 1;
DBUG_RETURN(0);
diff --git a/storage/columnstore/CMakeLists.txt b/storage/columnstore/CMakeLists.txt
index ebb138c70f0..ab29ffc566b 100644
--- a/storage/columnstore/CMakeLists.txt
+++ b/storage/columnstore/CMakeLists.txt
@@ -28,10 +28,14 @@ CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
add_subdirectory(columnstore)
IF(TARGET columnstore)
+ # Redo logic in cmake/plugin to prevent the attempted creation of *Symlinks package
+ SET(CPACK_COMPONENTS_ALL ${CPACK_COMPONENTS_ALL} columnstore-engineSymlinks)
+ SET(CPACK_COMPONENT_COLUMNSTORE-ENGINESYMLINKS_GROUP columnstore-engine PARENT_SCOPE)
+ SET(CPACK_COMPONENT_COLUMNSTORE-ENGINE_GROUP columnstore-engine PARENT_SCOPE)
# Needed to bump the component changes up to the main scope
APPEND_FOR_CPACK(CPACK_COMPONENTS_ALL)
IF (RPM)
- APPEND_FOR_CPACK(CPACK_RPM_columnstore-engine_PACKAGE_REQUIRES " binutils net-tools python3")
+ APPEND_FOR_CPACK(CPACK_RPM_columnstore-engine_PACKAGE_REQUIRES " binutils net-tools python3 MariaDB-client-compat MariaDB-server-compat")
APPEND_FOR_CPACK(CPACK_RPM_columnstore-engine_PACKAGE_RECOMMENDS " jemalloc")
APPEND_FOR_CPACK(CPACK_RPM_columnstore-engine_USER_FILELIST ";%ignore /var/lib;%ignore /var")
APPEND_FOR_CPACK(CPACK_RPM_columnstore-engine_PACKAGE_CONFLICTS " thrift MariaDB-columnstore-platform MariaDB-columnstore-libs")
diff --git a/storage/columnstore/columnstore b/storage/columnstore/columnstore
-Subproject fa286826cbeb654ec90b6a26f206dd75a5e8be9
+Subproject 58da5eea954dbbce8c954c323dd2c8247e54303
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index 23d3c7c1058..502c1f4af6d 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -1614,10 +1614,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Scale= 0;
pcf->Opt= (fop) ? (int)fop->opt : 0;
- if (fp->field_length >= 0)
- pcf->Length= fp->field_length;
- else
- pcf->Length= 256; // BLOB?
+ pcf->Length= fp->field_length;
pcf->Precision= pcf->Length;
@@ -7400,7 +7397,8 @@ int ha_connect::multi_range_read_next(range_id_t *range_info)
ha_rows ha_connect::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *flags, Cost_estimate *cost)
+ uint *flags, ha_rows limit,
+ Cost_estimate *cost)
{
/*
This call is here because there is no location where this->table would
@@ -7414,7 +7412,7 @@ ha_rows ha_connect::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
*flags|= HA_MRR_USE_DEFAULT_IMPL;
ha_rows rows= ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param, n_ranges,
- bufsz, flags, cost);
+ bufsz, flags, limit, cost);
xp->g->Mrr= !(*flags & HA_MRR_USE_DEFAULT_IMPL);
return rows;
} // end of multi_range_read_info_const
diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h
index 71ceb7974ba..c83584a62e4 100644
--- a/storage/connect/ha_connect.h
+++ b/storage/connect/ha_connect.h
@@ -308,13 +308,18 @@ public:
/** @brief
Called in test_quick_select to determine if indexes should be used.
*/
- virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; }
+ virtual IO_AND_CPU_COST scan_time()
+ { return { 0, (double) (stats.records+stats.deleted) * DISK_READ_COST }; };
/** @brief
This method will never be called if you do not implement indexes.
*/
- virtual double read_time(uint, uint, ha_rows rows)
- { return (double) rows / 20.0+1; }
+ virtual IO_AND_CPU_COST keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks)
+ {
+ return { 0, (double) rows * 0.001 };
+ }
+
/*
Everything below are methods that we implement in ha_connect.cc.
@@ -497,7 +502,8 @@ int index_prev(uchar *buf);
ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *flags, Cost_estimate *cost);
+ uint *flags, ha_rows limit,
+ Cost_estimate *cost);
ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
uint key_parts, uint *bufsz,
uint *flags, Cost_estimate *cost);
diff --git a/storage/connect/mysql-test/connect/r/index.result b/storage/connect/mysql-test/connect/r/index.result
index baebf1f1ebe..fdb44d06ee1 100644
--- a/storage/connect/mysql-test/connect/r/index.result
+++ b/storage/connect/mysql-test/connect/r/index.result
@@ -96,18 +96,25 @@ sexe genre
0 Inconnu
1 Masculin
2 Feminin
-SELECT nom, prenom, genre FROM t1 NATURAL JOIN t2 LIMIT 10;
+# t2 has only 3 rows. Force eq_ref by increasing table scan cost!
+set @@optimizer_scan_setup_cost=10000;
+explain SELECT nom, prenom, genre FROM t1 NATURAL JOIN t2 order by nom,prenom LIMIT 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4545 Using filesort
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.sexe 1 Using where
+SELECT nom, prenom, genre FROM t1 NATURAL JOIN t2 order by nom,prenom LIMIT 10;
nom prenom genre
-ESCOURCHE BENEDICTE Feminin
-VICENTE LAURENCE Feminin
-NICOLAS ROGER Masculin
-TESSEREAU MARIE HELENE Feminin
-MOGADOR ALAIN Masculin
-CHAUSSEE ERIC DENIS Masculin
-MAILLOT GEORGES Masculin
-CAMILLE NADINE Feminin
-BRUYERES JEAN MARC Masculin
-LONES GERARD Masculin
+ABBADIE MONIQUE Feminin
+ABBAYE ANNICK Feminin
+ABBAYE GERALD Masculin
+ABBE KATIA Feminin
+ABBE MICHELE Feminin
+ABBE SOPHIE Feminin
+ABBEVILLE PASCAL Masculin
+ABEBERRY PATRICK Masculin
+ABEILLES RENE Masculin
+ABEL JEAN PIERRE Masculin
+set @@optimizer_scan_setup_cost=default;
#
# Another table
#
diff --git a/storage/connect/mysql-test/connect/r/mysql_index.result b/storage/connect/mysql-test/connect/r/mysql_index.result
index 54acc7be08d..b6c34add632 100644
--- a/storage/connect/mysql-test/connect/r/mysql_index.result
+++ b/storage/connect/mysql-test/connect/r/mysql_index.result
@@ -7,7 +7,7 @@ msg char(100) DEFAULT NULL,
PRIMARY KEY (id)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
INSERT INTO t1 VALUES(1,'Un'),(3,'Trois'),(5,'Cinq');
-INSERT INTO t1 VALUES(2,'Two'),(4,'Four'),(6,'Six');
+INSERT INTO t1 VALUES(2,'Two'),(4,'Four'),(6,'Six'), (7,'seven');
SELECT * FROM t1;
id msg
1 Un
@@ -16,6 +16,7 @@ id msg
2 Two
4 Four
6 Six
+7 seven
#
# Make local MYSQL table with indexed id column
#
@@ -35,6 +36,7 @@ id msg
2 Two
4 Four
6 Six
+7 seven
SELECT * FROM t2 WHERE id = 3;
id msg
3 Trois
@@ -49,12 +51,14 @@ SELECT * FROM t2 WHERE id > 4;
id msg
5 Cinq
6 Six
+7 seven
SELECT * FROM t2 WHERE id >= 3;
id msg
3 Trois
4 Four
5 Cinq
6 Six
+7 seven
SELECT * FROM t2 WHERE id < 3;
id msg
1 Un
@@ -64,6 +68,10 @@ id msg
1 Un
5 Cinq
6 Six
+7 seven
+explain SELECT * FROM t2 WHERE id <= 3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 2 Using where
SELECT * FROM t2 WHERE id <= 3;
id msg
1 Un
@@ -87,6 +95,7 @@ id msg
4 Four
5 Cinq
6 Six
+7 seven
UPDATE t2 SET msg = 'Five' WHERE id = 5;
Warnings:
Note 1105 t1: 1 affected rows
@@ -98,6 +107,7 @@ id msg
2 Two
4 Four
6 Six
+7 seven
DELETE FROM t2 WHERE id = 4;
Warnings:
Note 1105 t1: 1 affected rows
@@ -108,6 +118,7 @@ id msg
5 Five
2 Two
6 Six
+7 seven
DROP TABLE t2;
DROP TABLE t1;
#
diff --git a/storage/connect/mysql-test/connect/t/index.test b/storage/connect/mysql-test/connect/t/index.test
index 47bfbae7680..546d5184e9f 100644
--- a/storage/connect/mysql-test/connect/t/index.test
+++ b/storage/connect/mysql-test/connect/t/index.test
@@ -57,8 +57,11 @@ create table t2
genre CHAR(8) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='sexe.csv' SEP_CHAR=';' ENDING=2;
SELECT * FROM t2;
-SELECT nom, prenom, genre FROM t1 NATURAL JOIN t2 LIMIT 10;
-
+--echo # t2 has only 3 rows. Force eq_ref by increasing table scan cost!
+set @@optimizer_scan_setup_cost=10000;
+explain SELECT nom, prenom, genre FROM t1 NATURAL JOIN t2 order by nom,prenom LIMIT 10;
+SELECT nom, prenom, genre FROM t1 NATURAL JOIN t2 order by nom,prenom LIMIT 10;
+set @@optimizer_scan_setup_cost=default;
--echo #
--echo # Another table
--echo #
diff --git a/storage/connect/mysql-test/connect/t/mysql_index.test b/storage/connect/mysql-test/connect/t/mysql_index.test
index cb4a332cdf8..a70ea3fd6f9 100644
--- a/storage/connect/mysql-test/connect/t/mysql_index.test
+++ b/storage/connect/mysql-test/connect/t/mysql_index.test
@@ -30,7 +30,7 @@ CREATE TABLE t1 (
PRIMARY KEY (id)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
INSERT INTO t1 VALUES(1,'Un'),(3,'Trois'),(5,'Cinq');
-INSERT INTO t1 VALUES(2,'Two'),(4,'Four'),(6,'Six');
+INSERT INTO t1 VALUES(2,'Two'),(4,'Four'),(6,'Six'), (7,'seven');
SELECT * FROM t1;
--echo #
@@ -54,6 +54,7 @@ SELECT * FROM t2 WHERE id > 4;
SELECT * FROM t2 WHERE id >= 3;
SELECT * FROM t2 WHERE id < 3;
SELECT * FROM t2 WHERE id < 2 OR id > 4;
+explain SELECT * FROM t2 WHERE id <= 3;
SELECT * FROM t2 WHERE id <= 3;
SELECT * FROM t2 WHERE id BETWEEN 3 AND 5;
SELECT * FROM t2 WHERE id > 2 AND id < 6;
diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp
index 96a9f70e4a3..6ece7115ea5 100644
--- a/storage/connect/tabext.cpp
+++ b/storage/connect/tabext.cpp
@@ -466,7 +466,7 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
if (Quote) {
// Tabname can have both database and table identifiers, we need to parse
- if (res= strstr(buf, "."))
+ if ((res= strstr(buf, ".")))
{
// Parse schema
my_len= res - buf + 1;
diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h
index 043183444da..856bb789320 100644
--- a/storage/csv/ha_tina.h
+++ b/storage/csv/ha_tina.h
@@ -124,7 +124,12 @@ public:
/*
Called in test_quick_select to determine if indexes should be used.
*/
- virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; }
+ virtual IO_AND_CPU_COST scan_time()
+ {
+ return
+ { (double) ((share->saved_data_file_length + IO_SIZE-1))/ IO_SIZE,
+ (stats.records+stats.deleted) * ROW_NEXT_FIND_COST };
+ }
/* The next method will never be called */
virtual bool fast_key_read() { return 1;}
/*
diff --git a/storage/example/ha_example.h b/storage/example/ha_example.h
index 5d067f7cda9..78b07ed5d9f 100644
--- a/storage/example/ha_example.h
+++ b/storage/example/ha_example.h
@@ -148,15 +148,40 @@ public:
uint max_supported_key_length() const { return 0; }
/** @brief
- Called in test_quick_select to determine if indexes should be used.
+ Called in test_quick_select to determine cost of table scan
*/
- virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; }
+ virtual IO_AND_CPU_COST scan_time()
+ {
+ IO_AND_CPU_COST cost;
+ /* 0 blocks, 0.001 ms / row */
+ cost.io= (double) (stats.records+stats.deleted) * DISK_READ_COST;
+ cost.cpu= 0;
+ return cost;
+ }
/** @brief
This method will never be called if you do not implement indexes.
*/
- virtual double read_time(uint, uint, ha_rows rows)
- { return (double) rows / 20.0+1; }
+ virtual IO_AND_CPU_COST keyread_time(uint, ulong, ha_rows rows,
+ ulonglong blocks)
+ {
+ IO_AND_CPU_COST cost;
+ cost.io= blocks * DISK_READ_COST;
+ cost.cpu= (double) rows * 0.001;
+ return cost;
+ }
+
+ /** @brief
+ Cost of fetching 'rows' records through rnd_pos()
+ */
+ virtual IO_AND_CPU_COST rnd_pos_time(ha_rows rows)
+ {
+ IO_AND_CPU_COST cost;
+ /* 0 blocks, 0.001 ms / row */
+ cost.io= 0;
+ cost.cpu= (double) rows * DISK_READ_COST;
+ return cost;
+ }
/*
Everything below are methods that we implement in ha_example.cc.
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index 25b12de3cd5..efb598bf91e 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -460,6 +460,20 @@ static void init_federated_psi_keys(void)
#endif /* HAVE_PSI_INTERFACE */
/*
+ Federated doesn't need costs.disk_read_ratio as everything is one a
+ remote server and nothing is cached locally
+*/
+
+static void federated_update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+ /*
+ Setting disk_read_ratios to 1.0, ensures we are using the costs
+ from rnd_pos_time() and scan_time()
+ */
+ costs->disk_read_ratio= 1.0;
+}
+
+/*
Initialize the federated handler.
SYNOPSIS
@@ -485,6 +499,7 @@ int federated_db_init(void *p)
federated_hton->rollback= federated_rollback;
federated_hton->create= federated_create_handler;
federated_hton->drop_table= [](handlerton *, const char*) { return -1; };
+ federated_hton->update_optimizer_costs= federated_update_optimizer_costs;
federated_hton->flags= HTON_ALTER_NOT_SUPPORTED | HTON_NO_PARTITION;
/*
@@ -909,7 +924,6 @@ ha_federated::ha_federated(handlerton *hton,
bzero(&bulk_insert, sizeof(bulk_insert));
}
-
/*
Convert MySQL result set row to handler internal format
@@ -2879,11 +2893,11 @@ int ha_federated::info(uint flag)
&error);
/*
- size of IO operations (This is based on a good guess, no high science
- involved)
+ Size of IO operations. This is used to calculate time to scan a table.
+ See handler.cc::keyread_time
*/
if (flag & HA_STATUS_CONST)
- stats.block_size= 4096;
+ stats.block_size= 1500; // Typical size of an TCP packet
}
diff --git a/storage/federated/ha_federated.h b/storage/federated/ha_federated.h
index fe729f08413..b5ee49755cb 100644
--- a/storage/federated/ha_federated.h
+++ b/storage/federated/ha_federated.h
@@ -180,23 +180,26 @@ public:
The reason for "records * 1000" is that such a large number forces
this to use indexes "
*/
- double scan_time()
+
+ IO_AND_CPU_COST scan_time()
{
DBUG_PRINT("info", ("records %lu", (ulong) stats.records));
- return (double)(stats.records*1000);
+ return
+ {
+ 0,
+ (double) (stats.mean_rec_length * stats.records)/8192 * DISK_READ_COST+
+ 1000,
+ };
}
- /*
- The next method will never be called if you do not implement indexes.
- */
- double read_time(uint index, uint ranges, ha_rows rows)
+ IO_AND_CPU_COST keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks)
{
- /*
- Per Brian, this number is bugus, but this method must be implemented,
- and at a later date, he intends to document this issue for handler code
- */
- return (double) rows / 20.0+1;
+ return {0, (double) (ranges + rows) * DISK_READ_COST };
+ }
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows)
+ {
+ return {0, (double) rows * DISK_READ_COST };
}
-
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
/*
Everything below are methods that we implment in ha_federated.cc.
@@ -240,16 +243,11 @@ public:
void position(const uchar *record); //required
/*
A ref is a pointer inside a local buffer. It is not comparable to
- other ref's. This is never called as HA_NON_COMPARABLE_ROWID is set.
+ other ref's.
*/
int cmp_ref(const uchar *ref1, const uchar *ref2)
{
-#ifdef NOT_YET
- DBUG_ASSERT(0);
- return 0;
-#else
- return handler::cmp_ref(ref1,ref2); /* Works if table scan is used */
-#endif
+ return handler::cmp_ref(ref1,ref2); /* Works if table scan is used */
}
int info(uint); //required
int extra(ha_extra_function operation);
@@ -285,4 +283,3 @@ public:
int execute_simple_query(const char *query, int len);
int reset(void);
};
-
diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc
index 598886b8915..62a71aa6db6 100644
--- a/storage/federatedx/ha_federatedx.cc
+++ b/storage/federatedx/ha_federatedx.cc
@@ -412,6 +412,20 @@ static select_handler*
create_federatedx_select_handler(THD* thd, SELECT_LEX *sel);
/*
+ Federated doesn't need costs.disk_read_ratio as everything is one a remote
+ server and nothing is cached locally
+*/
+
+static void federatedx_update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+ /*
+ Setting disk_read_ratios to 1.0, ensures we are using the costs
+ from rnd_pos_time() and scan_time()
+ */
+ costs->disk_read_ratio= 0.0;
+}
+
+/*
Initialize the federatedx handler.
SYNOPSIS
@@ -443,6 +457,7 @@ int federatedx_db_init(void *p)
federatedx_hton->flags= HTON_ALTER_NOT_SUPPORTED;
federatedx_hton->create_derived= create_federatedx_derived_handler;
federatedx_hton->create_select= create_federatedx_select_handler;
+ federatedx_hton->update_optimizer_costs= federatedx_update_optimizer_costs;
if (mysql_mutex_init(fe_key_mutex_federatedx,
&federatedx_mutex, MY_MUTEX_INIT_FAST))
@@ -3098,11 +3113,11 @@ int ha_federatedx::info(uint flag)
if (flag & (HA_STATUS_VARIABLE | HA_STATUS_CONST))
{
/*
- size of IO operations (This is based on a good guess, no high science
- involved)
+ Size of IO operations. This is used to calculate time to scan a table.
+ See handler.cc::keyread_time
*/
if (flag & HA_STATUS_CONST)
- stats.block_size= 4096;
+ stats.block_size= 1500; // Typical size of an TCP packet
if ((*iop)->table_metadata(&stats, share->table_name,
(uint)share->table_name_length, flag))
diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h
index 3573c658b11..a67fe1efa8f 100644
--- a/storage/federatedx/ha_federatedx.h
+++ b/storage/federatedx/ha_federatedx.h
@@ -222,7 +222,6 @@ public:
virtual int seek_position(FEDERATEDX_IO_RESULT **io_result,
const void *ref)=0;
virtual void set_thd(void *thd) { }
-
};
@@ -365,29 +364,31 @@ public:
Talk to Kostja about this - how to get the
number of rows * ...
disk scan time on other side (block size, size of the row) + network time ...
- The reason for "records * 1000" is that such a large number forces
- this to use indexes "
+ The reason for "1000" is that such a large number forces this to use indexes "
*/
- double scan_time()
+ IO_AND_CPU_COST scan_time()
{
DBUG_PRINT("info", ("records %lu", (ulong) stats.records));
- return (double)(stats.records*1000);
+ return
+ {
+ 0,
+ (double) (stats.mean_rec_length * stats.records)/8192 * DISK_READ_COST+
+ 1000,
+ };
}
- /*
- The next method will never be called if you do not implement indexes.
- */
- double read_time(uint index, uint ranges, ha_rows rows)
+ IO_AND_CPU_COST keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks)
+ {
+ return {0, (double) (ranges + rows) * DISK_READ_COST };
+ }
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows)
{
- /*
- Per Brian, this number is bugus, but this method must be implemented,
- and at a later date, he intends to document this issue for handler code
- */
- return (double) rows / 20.0+1;
+ return {0, (double) rows * DISK_READ_COST };
}
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
/*
- Everything below are methods that we implment in ha_federatedx.cc.
+ Everything below are methods that we implement in ha_federatedx.cc.
Most of these methods are not obligatory, skip them and
MySQL will treat them as not implemented
diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc
index 5f7f0c1efa0..cc7dc79e508 100644
--- a/storage/heap/ha_heap.cc
+++ b/storage/heap/ha_heap.cc
@@ -42,6 +42,28 @@ static int heap_drop_table(handlerton *hton, const char *path)
return error == ENOENT ? -1 : error;
}
+/* See optimizer_costs.txt for how the following values where calculated */
+#define HEAP_ROW_NEXT_FIND_COST 8.0166e-06 // For table scan
+#define BTREE_KEY_NEXT_FIND_COST 0.00007739 // For binary tree scan
+#define HEAP_LOOKUP_COST 0.00016097 // Heap lookup cost
+
+static void heap_update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+ /*
+ A lot of values are 0 as heap supports all needed xxx_time() functions
+ */
+ costs->disk_read_cost=0; // All data in memory
+ costs->disk_read_ratio= 0.0; // All data in memory
+ costs->key_next_find_cost= 0;
+ costs->key_copy_cost= 0; // Set in keyread_time()
+ costs->row_copy_cost= 2.334e-06; // This is small as its just a memcpy
+ costs->row_lookup_cost= 0; // Direct pointer
+ costs->row_next_find_cost= 0;
+ costs->key_lookup_cost= 0;
+ costs->key_next_find_cost= 0;
+ costs->index_block_copy_cost= 0;
+}
+
int heap_init(void *p)
{
handlerton *heap_hton;
@@ -53,6 +75,7 @@ int heap_init(void *p)
heap_hton->create= heap_create_handler;
heap_hton->panic= heap_panic;
heap_hton->drop_table= heap_drop_table;
+ heap_hton->update_optimizer_costs= heap_update_optimizer_costs;
heap_hton->flags= HTON_CAN_RECREATE;
return 0;
@@ -73,7 +96,8 @@ static handler *heap_create_handler(handlerton *hton,
ha_heap::ha_heap(handlerton *hton, TABLE_SHARE *table_arg)
:handler(hton, table_arg), file(0), records_changed(0), key_stat_version(0),
internal_table(0)
-{}
+{
+}
/*
Hash index statistics is updated (copied from HP_KEYDEF::hash_buckets to
@@ -228,6 +252,41 @@ void ha_heap::update_key_stats()
}
+IO_AND_CPU_COST ha_heap::keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks)
+{
+ KEY *key=table->key_info+index;
+ if (key->algorithm == HA_KEY_ALG_BTREE)
+ {
+ double lookup_cost;
+ lookup_cost= ranges * costs->key_cmp_cost * log2(stats.records+1);
+ return {0, ranges * lookup_cost + (rows-ranges) * BTREE_KEY_NEXT_FIND_COST };
+ }
+ else
+ {
+ return {0, (ranges * HEAP_LOOKUP_COST +
+ (rows-ranges) * BTREE_KEY_NEXT_FIND_COST) };
+ }
+}
+
+
+IO_AND_CPU_COST ha_heap::scan_time()
+{
+ return {0, (double) (stats.records+stats.deleted) * HEAP_ROW_NEXT_FIND_COST };
+}
+
+
+IO_AND_CPU_COST ha_heap::rnd_pos_time(ha_rows rows)
+{
+ /*
+ The row pointer is a direct pointer to the block. Thus almost instant
+ in practice.
+ Note that ha_rnd_pos_time() will add ROW_COPY_COST to this result
+ */
+ return { 0, 0 };
+}
+
+
int ha_heap::write_row(const uchar * buf)
{
int res;
diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h
index 18e0d1a92d5..eed91176136 100644
--- a/storage/heap/ha_heap.h
+++ b/storage/heap/ha_heap.h
@@ -37,15 +37,15 @@ class ha_heap final : public handler
public:
ha_heap(handlerton *hton, TABLE_SHARE *table);
~ha_heap() = default;
- handler *clone(const char *name, MEM_ROOT *mem_root);
- const char *index_type(uint inx)
+ handler *clone(const char *name, MEM_ROOT *mem_root) override;
+ const char *index_type(uint inx) override
{
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
"BTREE" : "HASH");
}
/* Rows also use a fixed-size format */
- enum row_type get_row_type() const { return ROW_TYPE_FIXED; }
- ulonglong table_flags() const
+ enum row_type get_row_type() const override { return ROW_TYPE_FIXED; }
+ ulonglong table_flags() const override
{
return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY |
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
@@ -53,73 +53,73 @@ public:
HA_REC_NOT_IN_SEQ | HA_CAN_INSERT_DELAYED | HA_NO_TRANSACTIONS |
HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT | HA_CAN_HASH_KEYS);
}
- ulong index_flags(uint inx, uint part, bool all_parts) const
+ ulong index_flags(uint inx, uint part, bool all_parts) const override
{
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE :
HA_ONLY_WHOLE_INDEX | HA_KEY_SCAN_NOT_ROR);
}
- const key_map *keys_to_use_for_scanning() { return &btree_keys; }
- uint max_supported_keys() const { return MAX_KEY; }
- uint max_supported_key_part_length() const { return MAX_KEY_LENGTH; }
- double scan_time()
- { return (double) (stats.records+stats.deleted) / 20.0+10; }
- double read_time(uint index, uint ranges, ha_rows rows)
- { return (double) (rows +1)/ 20.0; }
- double keyread_time(uint index, uint ranges, ha_rows rows)
- { return (double) (rows + ranges) / 20.0 ; }
- double avg_io_cost()
- { return 0.05; } /* 1/20 */
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- void set_keys_for_scanning(void);
- int write_row(const uchar * buf);
- int update_row(const uchar * old_data, const uchar * new_data);
- int delete_row(const uchar * buf);
- virtual void get_auto_increment(ulonglong offset, ulonglong increment,
- ulonglong nb_desired_values,
- ulonglong *first_value,
- ulonglong *nb_reserved_values);
+ const key_map *keys_to_use_for_scanning() override { return &btree_keys; }
+ uint max_supported_keys() const override { return MAX_KEY; }
+ uint max_supported_key_part_length() const override { return MAX_KEY_LENGTH; }
+ IO_AND_CPU_COST scan_time() override;
+ IO_AND_CPU_COST keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks) override;
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows) override;
+ /* 0 for avg_io_cost ensures that there are no read-block calculations */
+
+ int open(const char *name, int mode, uint test_if_locked) override;
+ int close(void) override;
+ int write_row(const uchar * buf) override;
+ int update_row(const uchar * old_data, const uchar * new_data) override;
+ int delete_row(const uchar * buf) override;
+ void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values) override;
int index_read_map(uchar * buf, const uchar * key, key_part_map keypart_map,
- enum ha_rkey_function find_flag);
- int index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map);
+ enum ha_rkey_function find_flag) override;
+ int index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map)
+ override;
int index_read_idx_map(uchar * buf, uint index, const uchar * key,
key_part_map keypart_map,
- enum ha_rkey_function find_flag);
- int index_next(uchar * buf);
- int index_prev(uchar * buf);
- int index_first(uchar * buf);
- int index_last(uchar * buf);
- int rnd_init(bool scan);
- int rnd_next(uchar *buf);
- int rnd_pos(uchar * buf, uchar *pos);
- void position(const uchar *record);
- int can_continue_handler_scan();
- int info(uint);
- int extra(enum ha_extra_function operation);
- int reset();
- int external_lock(THD *thd, int lock_type);
- int delete_all_rows(void);
- int reset_auto_increment(ulonglong value);
- int disable_indexes(uint mode);
- int enable_indexes(uint mode);
- int indexes_are_disabled(void);
+ enum ha_rkey_function find_flag) override;
+ int index_next(uchar * buf) override;
+ int index_prev(uchar * buf) override;
+ int index_first(uchar * buf) override;
+ int index_last(uchar * buf) override;
+ int rnd_init(bool scan) override;
+ int rnd_next(uchar *buf) override;
+ int rnd_pos(uchar * buf, uchar *pos) override;
+ void position(const uchar *record) override;
+ int can_continue_handler_scan() override;
+ int info(uint) override;
+ int extra(enum ha_extra_function operation) override;
+ int reset() override;
+ int external_lock(THD *thd, int lock_type) override;
+ int delete_all_rows(void) override;
+ int reset_auto_increment(ulonglong value) override;
+ int disable_indexes(uint mode) override;
+ int enable_indexes(uint mode) override;
+ int indexes_are_disabled(void) override;
ha_rows records_in_range(uint inx, const key_range *start_key,
- const key_range *end_key, page_range *pages);
- int delete_table(const char *from);
- void drop_table(const char *name);
- int rename_table(const char * from, const char * to);
- int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
- void update_create_info(HA_CREATE_INFO *create_info);
+ const key_range *end_key, page_range *pages) override;
+ int delete_table(const char *from) override;
+ void drop_table(const char *name) override;
+ int rename_table(const char * from, const char * to) override;
+ int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info) override;
+ void update_create_info(HA_CREATE_INFO *create_info) override;
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
- int cmp_ref(const uchar *ref1, const uchar *ref2)
+ enum thr_lock_type lock_type) override;
+ int cmp_ref(const uchar *ref1, const uchar *ref2) override
{
return memcmp(ref1, ref2, sizeof(HEAP_PTR));
}
- bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
- int find_unique_row(uchar *record, uint unique_idx);
+ bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes)
+ override;
+ int find_unique_row(uchar *record, uint unique_idx) override;
private:
void update_key_stats();
+ void set_keys_for_scanning(void);
};
diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt
index a61b762f58b..3b1a285104c 100644
--- a/storage/innobase/CMakeLists.txt
+++ b/storage/innobase/CMakeLists.txt
@@ -1,6 +1,6 @@
# Copyright (c) 2006, 2017, Oracle and/or its affiliates. All rights reserved.
-# Copyright (c) 2014, 2022, MariaDB Corporation.
+# Copyright (c) 2014, 2023, MariaDB Corporation.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -264,7 +264,6 @@ SET(INNOBASE_SOURCES
include/handler0alter.h
include/hash0hash.h
include/ibuf0ibuf.h
- include/ibuf0ibuf.inl
include/lock0iter.h
include/lock0lock.h
include/lock0lock.inl
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index f28d3929569..9b69fde0408 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -37,7 +37,6 @@ Created 6/2/1994 Heikki Tuuri
#include "btr0defragment.h"
#include "rem0cmp.h"
#include "lock0lock.h"
-#include "ibuf0ibuf.h"
#include "trx0trx.h"
#include "srv0mon.h"
#include "gis0geo.h"
@@ -181,9 +180,8 @@ we allocate pages for the non-leaf levels of the tree.
@param block B-tree root page
@param space tablespace
@return whether the segment header is valid */
-static bool btr_root_fseg_validate(ulint offset,
- const buf_block_t &block,
- const fil_space_t &space)
+bool btr_root_fseg_validate(ulint offset, const buf_block_t &block,
+ const fil_space_t &space)
{
ut_ad(block.page.id().space() == space.id);
const uint16_t hdr= mach_read_from_2(offset + FSEG_HDR_OFFSET +
@@ -213,12 +211,11 @@ ATTRIBUTE_COLD void btr_decryption_failed(const dict_index_t &index)
@param[in] index index tree
@param[in] page page number
@param[in] mode latch mode
-@param[in] merge whether change buffer merge should be attempted
@param[in,out] mtr mini-transaction
@param[out] err error code
@return block */
buf_block_t *btr_block_get(const dict_index_t &index,
- uint32_t page, ulint mode, bool merge,
+ uint32_t page, ulint mode,
mtr_t *mtr, dberr_t *err)
{
dberr_t local_err;
@@ -227,7 +224,7 @@ buf_block_t *btr_block_get(const dict_index_t &index,
buf_block_t *block=
buf_page_get_gen(page_id_t{index.table->space->id, page},
index.table->space->zip_size(), mode, nullptr, BUF_GET,
- mtr, err, merge && !index.is_clust());
+ mtr, err);
ut_ad(!block == (*err != DB_SUCCESS));
if (UNIV_LIKELY(block != nullptr))
@@ -276,7 +273,7 @@ btr_root_block_get(
block=
buf_page_get_gen(page_id_t{index->table->space->id, index->page},
index->table->space->zip_size(), mode, guess, BUF_GET,
- mtr, err, false);
+ mtr, err);
ut_ad(!block == (*err != DB_SUCCESS));
if (UNIV_LIKELY(block != nullptr))
@@ -290,7 +287,6 @@ btr_root_block_get(
*err= DB_PAGE_CORRUPTED;
block= nullptr;
}
- else if (index->is_ibuf());
else if (!btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF,
*block, *index->table->space) ||
!btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP,
@@ -526,47 +522,16 @@ btr_block_reget(mtr_t *mtr, const dict_index_t &index,
}
ut_ad(mtr->memo_contains_flagged(&index.lock, MTR_MEMO_X_LOCK));
- return btr_block_get(index, id.page_no(), rw_latch, true, mtr, err);
-}
-
-/**************************************************************//**
-Allocates a new file page to be used in an ibuf tree. Takes the page from
-the free list of the tree, which must contain pages!
-@return new allocated block, x-latched */
-static
-buf_block_t*
-btr_page_alloc_for_ibuf(
-/*====================*/
- dict_index_t* index, /*!< in: index tree */
- mtr_t* mtr, /*!< in: mtr */
- dberr_t* err) /*!< out: error code */
-{
- buf_block_t *root= btr_get_latched_root(*index, mtr);
- if (UNIV_UNLIKELY(!root))
- return root;
- buf_block_t *new_block=
- buf_page_get_gen(page_id_t(IBUF_SPACE_ID,
- mach_read_from_4(PAGE_HEADER +
- PAGE_BTR_IBUF_FREE_LIST +
- FLST_FIRST + FIL_ADDR_PAGE +
- root->page.frame)),
- 0, RW_X_LATCH, nullptr, BUF_GET, mtr, err);
- if (new_block)
- *err= flst_remove(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, new_block,
- PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, mtr);
- ut_d(if (*err == DB_SUCCESS)
- flst_validate(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr));
- return new_block;
+ return btr_block_get(index, id.page_no(), rw_latch, mtr, err);
}
/**************************************************************//**
Allocates a new file page to be used in an index tree. NOTE: we assume
that the caller has made the reservation for free extents!
@retval NULL if no page could be allocated */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
+MY_ATTRIBUTE((nonnull, warn_unused_result))
buf_block_t*
-btr_page_alloc_low(
-/*===============*/
+btr_page_alloc(
dict_index_t* index, /*!< in: index */
uint32_t hint_page_no, /*!< in: hint of a good page */
byte file_direction, /*!< in: direction where a possible
@@ -580,6 +545,8 @@ btr_page_alloc_low(
page should be initialized. */
dberr_t* err) /*!< out: error code */
{
+ ut_ad(level < BTR_MAX_NODE_LEVEL);
+
const auto savepoint= mtr->get_savepoint();
buf_block_t *root= btr_root_block_get(index, RW_NO_LATCH, mtr, err);
if (UNIV_UNLIKELY(!root))
@@ -607,54 +574,6 @@ btr_page_alloc_low(
true, mtr, init_mtr, err);
}
-/**************************************************************//**
-Allocates a new file page to be used in an index tree. NOTE: we assume
-that the caller has made the reservation for free extents!
-@retval NULL if no page could be allocated */
-buf_block_t*
-btr_page_alloc(
-/*===========*/
- dict_index_t* index, /*!< in: index */
- uint32_t hint_page_no, /*!< in: hint of a good page */
- byte file_direction, /*!< in: direction where a possible
- page split is made */
- ulint level, /*!< in: level where the page is placed
- in the tree */
- mtr_t* mtr, /*!< in/out: mini-transaction
- for the allocation */
- mtr_t* init_mtr, /*!< in/out: mini-transaction
- for x-latching and initializing
- the page */
- dberr_t* err) /*!< out: error code */
-{
- ut_ad(level < BTR_MAX_NODE_LEVEL);
- return index->is_ibuf()
- ? btr_page_alloc_for_ibuf(index, mtr, err)
- : btr_page_alloc_low(index, hint_page_no, file_direction, level,
- mtr, init_mtr, err);
-}
-
-/**************************************************************//**
-Frees a page used in an ibuf tree. Puts the page to the free list of the
-ibuf tree. */
-static
-dberr_t
-btr_page_free_for_ibuf(
-/*===================*/
- dict_index_t* index, /*!< in: index tree */
- buf_block_t* block, /*!< in: block to be freed, x-latched */
- mtr_t* mtr) /*!< in: mtr */
-{
- ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
- buf_block_t *root= btr_get_latched_root(*index, mtr);
- dberr_t err=
- flst_add_first(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
- block, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, mtr);
- ut_d(if (err == DB_SUCCESS)
- flst_validate(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr));
- return err;
-}
-
/** Free an index page.
@param[in,out] index index tree
@param[in,out] block block to be freed
@@ -687,9 +606,6 @@ dberr_t btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
The page will be freed, so previous changes to it by this
mini-transaction should not matter. */
- if (index->is_ibuf())
- return btr_page_free_for_ibuf(index, block, mtr);
-
fil_space_t *space= index->table->space;
dberr_t err;
@@ -772,8 +688,7 @@ btr_node_ptr_get_child(
return btr_block_get(
*index, btr_node_ptr_get_child_page_no(node_ptr, offsets),
- RW_SX_LATCH, btr_page_get_level(page_align(node_ptr)) == 1,
- mtr, err);
+ RW_SX_LATCH, mtr, err);
}
MY_ATTRIBUTE((nonnull(2,3,4), warn_unused_result))
@@ -1036,77 +951,32 @@ btr_create(
mtr_t* mtr,
dberr_t* err)
{
- buf_block_t* block;
-
ut_ad(mtr->is_named_space(space));
ut_ad(index_id != BTR_FREED_INDEX_ID);
ut_ad(index || space == fil_system.sys_space);
- /* Create the two new segments (one, in the case of an ibuf tree) for
- the index tree; the segment headers are put on the allocated root page
- (for an ibuf tree, not in the root, but on a separate ibuf header
- page) */
-
- if (UNIV_UNLIKELY(type & DICT_IBUF)) {
- /* Allocate first the ibuf header page */
- buf_block_t* ibuf_hdr_block = fseg_create(
- space, IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr, err);
-
- if (ibuf_hdr_block == NULL) {
- return(FIL_NULL);
- }
-
- ut_ad(ibuf_hdr_block->page.id().page_no()
- == IBUF_HEADER_PAGE_NO);
- /* Allocate then the next page to the segment: it will be the
- tree root page */
-
- block = fseg_alloc_free_page_general(
- buf_block_get_frame(ibuf_hdr_block)
- + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
- IBUF_TREE_ROOT_PAGE_NO,
- FSP_UP, false, mtr, mtr, err);
-
- if (block == NULL) {
- return(FIL_NULL);
- }
-
- ut_ad(block->page.id() == page_id_t(0,IBUF_TREE_ROOT_PAGE_NO));
+ /* Create the two new segments for the index tree;
+ the segment headers are put on the allocated root page */
- flst_init(block, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr);
- } else {
- block = fseg_create(space, PAGE_HEADER + PAGE_BTR_SEG_TOP,
- mtr, err);
+ buf_block_t *block = fseg_create(space, PAGE_HEADER + PAGE_BTR_SEG_TOP,
+ mtr, err);
- if (block == NULL) {
- return(FIL_NULL);
- }
+ if (!block) {
+ return FIL_NULL;
+ }
- if (!fseg_create(space, PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr,
- err, false, block)) {
- /* Not enough space for new segment, free root
- segment before return. */
- btr_free_root(block, *space, mtr);
- return(FIL_NULL);
- }
+ if (!fseg_create(space, PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr,
+ err, false, block)) {
+ /* Not enough space for new segment, free root
+ segment before return. */
+ btr_free_root(block, *space, mtr);
+ return FIL_NULL;
}
ut_ad(!page_has_siblings(block->page.frame));
btr_root_page_init(block, index_id, index, mtr);
- /* We reset the free bits for the page in a separate
- mini-transaction to allow creation of several trees in the
- same mtr, otherwise the latch on a bitmap page would prevent
- it because of the latching order.
-
- Note: Insert Buffering is disabled for temporary tables given that
- most temporary tables are smaller in size and short-lived. */
- if (!(type & DICT_CLUSTERED)
- && (!index || !index->table->is_temporary())) {
- ibuf_reset_free_bits(block);
- }
-
/* In the following assertion we test that two records of maximum
allowed size fit on the root page: this fact is needed to ensure
correctness of split algorithms */
@@ -1258,7 +1128,7 @@ void btr_drop_temporary_table(const dict_table_t &table)
{
if (buf_block_t *block= buf_page_get_low({SRV_TMP_SPACE_ID, index->page}, 0,
RW_X_LATCH, nullptr, BUF_GET, &mtr,
- nullptr, false))
+ nullptr))
{
btr_free_but_not_root(block, MTR_LOG_NO_REDO);
mtr.set_log_mode(MTR_LOG_NO_REDO);
@@ -1429,18 +1299,18 @@ static dberr_t btr_page_reorganize_low(page_cur_t *cursor, mtr_t *mtr)
if (page_get_max_trx_id(block->page.frame))
/* PAGE_MAX_TRX_ID must be zero on non-leaf pages other than
clustered index root pages. */
- ut_ad(dict_index_is_sec_or_ibuf(cursor->index)
+ ut_ad(!cursor->index->is_primary()
? page_is_leaf(block->page.frame)
: block->page.id().page_no() == cursor->index->page);
else
/* PAGE_MAX_TRX_ID is unused in clustered index pages (other than
the root where it is repurposed as PAGE_ROOT_AUTO_INC), non-leaf
pages, and in temporary tables. It was always zero-initialized in
- page_create(). PAGE_MAX_TRX_ID must be nonzero on
- dict_index_is_sec_or_ibuf() leaf pages. */
+ page_create(). PAGE_MAX_TRX_ID must be nonzero on secondary index
+ leaf pages. */
ut_ad(cursor->index->table->is_temporary() ||
!page_is_leaf(block->page.frame) ||
- !dict_index_is_sec_or_ibuf(cursor->index));
+ cursor->index->is_primary());
#endif
const uint16_t data_size1= page_get_data_size(old->page.frame);
@@ -1640,15 +1510,7 @@ static dberr_t btr_page_reorganize_low(page_cur_t *cursor, mtr_t *mtr)
return DB_SUCCESS;
}
-/*************************************************************//**
-Reorganizes an index page.
-
-IMPORTANT: On success, the caller will have to update IBUF_BITMAP_FREE
-if this is a compressed leaf page in a secondary index. This has to
-be done either within the same mini-transaction, or by invoking
-ibuf_reset_free_bits() before mtr_commit(). On uncompressed pages,
-IBUF_BITMAP_FREE is unaffected by reorganization.
-
+/** Reorganize an index page.
@return error code
@retval DB_FAIL if reorganizing a ROW_FORMAT=COMPRESSED page failed */
dberr_t
@@ -1667,15 +1529,7 @@ btr_page_reorganize_block(
return btr_page_reorganize_low(&cur, mtr);
}
-/*************************************************************//**
-Reorganizes an index page.
-
-IMPORTANT: On success, the caller will have to update IBUF_BITMAP_FREE
-if this is a compressed leaf page in a secondary index. This has to
-be done either within the same mini-transaction, or by invoking
-ibuf_reset_free_bits() before mtr_commit(). On uncompressed pages,
-IBUF_BITMAP_FREE is unaffected by reorganization.
-
+/** Reorganize an index page.
@param cursor page cursor
@param mtr mini-transaction
@return error code
@@ -1901,6 +1755,7 @@ btr_root_raise_and_insert(
ut_ad(!page_is_empty(root->page.frame));
index = btr_cur_get_index(cursor);
ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable));
+ ut_ad(!index->is_spatial());
#ifdef UNIV_ZIP_DEBUG
ut_a(!root_page_zip
|| page_zip_validate(root_page_zip, root->page.frame, index));
@@ -1916,12 +1771,11 @@ btr_root_raise_and_insert(
return nullptr;
}
- if (index->is_ibuf()) {
- } else if (!btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF,
- *root, *index->table->space)
- || !btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP,
- *root, *index->table->space)) {
- return nullptr;
+ if (!btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF,
+ *root, *index->table->space)
+ || !btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP,
+ *root, *index->table->space)) {
+ return nullptr;
}
/* Allocate a new page to the tree. Root splitting is done by first
@@ -1987,18 +1841,12 @@ btr_root_raise_and_insert(
page_get_infimum_rec(root->page.frame));
}
- /* Move any existing predicate locks */
- if (dict_index_is_spatial(index)) {
- lock_prdt_rec_move(new_block, root_id);
- } else {
- btr_search_move_or_delete_hash_entries(
- new_block, root);
- }
+ btr_search_move_or_delete_hash_entries(new_block, root);
}
constexpr uint16_t max_trx_id = PAGE_HEADER + PAGE_MAX_TRX_ID;
- if (dict_index_is_sec_or_ibuf(index)) {
- /* In secondary indexes and the change buffer,
+ if (!index->is_primary()) {
+ /* In secondary indexes,
PAGE_MAX_TRX_ID can be reset on the root page, because
the field only matters on leaf pages, and the root no
longer is a leaf page. (Older versions of InnoDB did
@@ -2048,16 +1896,8 @@ btr_root_raise_and_insert(
/* Build the node pointer (= node key and page address) for the
child */
- if (dict_index_is_spatial(index)) {
- rtr_mbr_t new_mbr;
-
- rtr_page_cal_mbr(index, new_block, &new_mbr, *heap);
- node_ptr = rtr_index_build_node_ptr(
- index, &new_mbr, rec, new_page_no, *heap);
- } else {
- node_ptr = dict_index_build_node_ptr(
- index, rec, new_page_no, *heap, level);
- }
+ node_ptr = dict_index_build_node_ptr(index, rec, new_page_no, *heap,
+ level);
/* The node pointer must be marked as the predefined minimum record,
as there is no lower alphabetical limit to records in the leftmost
node of a level: */
@@ -2090,13 +1930,6 @@ btr_root_raise_and_insert(
to new_block at this point. Thus, the data should fit. */
ut_a(node_ptr_rec);
- /* We play safe and reset the free bits for the new page */
-
- if (!dict_index_is_clust(index)
- && !index->table->is_temporary()) {
- ibuf_reset_free_bits(new_block);
- }
-
page_cursor->block = new_block;
page_cursor->index = index;
@@ -2462,10 +2295,9 @@ btr_insert_on_non_leaf_level(
rtr_init_rtr_info(&rtr_info, false, &cursor, index, false);
rtr_info_update_btr(&cursor, &rtr_info);
- err = rtr_search_to_nth_level(level, tuple,
- PAGE_CUR_RTREE_INSERT,
- BTR_CONT_MODIFY_TREE,
- &cursor, mtr);
+ err = rtr_search_to_nth_level(&cursor, nullptr, tuple,
+ BTR_CONT_MODIFY_TREE, mtr,
+ PAGE_CUR_RTREE_INSERT, level);
} else {
err = btr_cur_search_to_nth_level(level, tuple, RW_X_LATCH,
&cursor, mtr);
@@ -2586,7 +2418,7 @@ btr_attach_half_pages(
ut_ad(mtr->memo_contains(index->lock,
MTR_MEMO_X_LOCK));
prev_block = btr_block_get(*index, prev_page_no,
- RW_X_LATCH, !level, mtr);
+ RW_X_LATCH, mtr);
}
#endif
}
@@ -2598,7 +2430,7 @@ btr_attach_half_pages(
ut_ad(mtr->memo_contains(index->lock,
MTR_MEMO_X_LOCK));
next_block = btr_block_get(*index, next_page_no,
- RW_X_LATCH, !level, mtr);
+ RW_X_LATCH, mtr);
}
#endif
}
@@ -2746,10 +2578,9 @@ btr_insert_into_right_sibling(
page_t* next_page;
btr_cur_t next_father_cursor;
rec_t* rec = nullptr;
- ulint max_size;
next_block = btr_block_get(*cursor->index(), next_page_no, RW_X_LATCH,
- page_is_leaf(page), mtr);
+ mtr);
if (UNIV_UNLIKELY(!next_block)) {
return nullptr;
}
@@ -2772,8 +2603,6 @@ btr_insert_into_right_sibling(
return nullptr;
}
- max_size = page_get_max_insert_size_after_reorganize(next_page, 1);
-
/* Extends gap lock for the next page */
if (is_leaf && cursor->index()->has_locking()) {
lock_update_node_pointer(block, next_block);
@@ -2783,15 +2612,6 @@ btr_insert_into_right_sibling(
n_ext, mtr);
if (!rec) {
- if (is_leaf
- && next_block->page.zip.ssize
- && !dict_index_is_clust(cursor->index())
- && !cursor->index()->table->is_temporary()) {
- /* Reset the IBUF_BITMAP_FREE bits, because
- page_cur_tuple_insert() will have attempted page
- reorganize before failing. */
- ibuf_reset_free_bits(next_block);
- }
return nullptr;
}
@@ -2829,34 +2649,12 @@ btr_insert_into_right_sibling(
}
ut_ad(rec_offs_validate(rec, cursor->index(), *offsets));
-
- if (is_leaf
- && !dict_index_is_clust(cursor->index())
- && !cursor->index()->table->is_temporary()) {
- /* Update the free bits of the B-tree page in the
- insert buffer bitmap. */
-
- if (next_block->page.zip.ssize) {
- ibuf_update_free_bits_zip(next_block, mtr);
- } else {
- ibuf_update_free_bits_if_full(
- next_block, max_size,
- rec_offs_size(*offsets) + PAGE_DIR_SLOT_SIZE);
- }
- }
-
return(rec);
}
/*************************************************************//**
Moves record list end to another page. Moved records include
split_rec.
-
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if new_block is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
@return error code */
static
dberr_t
@@ -2912,12 +2710,6 @@ page_move_rec_list_end(
/*************************************************************//**
Moves record list start to another page. Moved records do not include
split_rec.
-
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if new_block is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
@return error code */
static
dberr_t
@@ -2975,15 +2767,10 @@ btr_page_split_and_insert(
ut_ad(*err == DB_SUCCESS);
ut_ad(dtuple_check_typed(tuple));
+ ut_ad(!cursor->index()->is_spatial());
buf_pool.pages_split++;
- if (cursor->index()->is_spatial()) {
- /* Split rtree page and update parent */
- return rtr_page_split_and_insert(flags, cursor, offsets, heap,
- tuple, n_ext, mtr, err);
- }
-
if (!*heap) {
*heap = mem_heap_create(1024);
}
@@ -3348,13 +3135,6 @@ insert_empty:
/* The insert did not fit on the page: loop back to the
start of the function for a new split */
insert_failed:
- /* We play safe and reset the free bits for new_page */
- if (!dict_index_is_clust(page_cursor->index)
- && !page_cursor->index->table->is_temporary()) {
- ibuf_reset_free_bits(new_block);
- ibuf_reset_free_bits(block);
- }
-
n_iterations++;
ut_ad(n_iterations < 2
|| buf_block_get_page_zip(insert_block));
@@ -3364,17 +3144,6 @@ insert_failed:
}
func_exit:
- /* Insert fit on the page: update the free bits for the
- left and right pages in the same mtr */
-
- if (!dict_index_is_clust(page_cursor->index)
- && !page_cursor->index->table->is_temporary()
- && page_is_leaf(page)) {
-
- ibuf_update_free_bits_for_two_pages_low(
- left_block, right_block, mtr);
- }
-
ut_ad(page_validate(buf_block_get_frame(left_block),
page_cursor->index));
ut_ad(page_validate(buf_block_get_frame(right_block),
@@ -3410,8 +3179,7 @@ dberr_t btr_level_list_remove(const buf_block_t& block,
if (!prev)
{
ut_ad(mtr->memo_contains(index.lock, MTR_MEMO_X_LOCK));
- prev= btr_block_get(index, id.page_no(), RW_X_LATCH,
- page_is_leaf(block.page.frame), mtr, &err);
+ prev= btr_block_get(index, id.page_no(), RW_X_LATCH, mtr, &err);
if (UNIV_UNLIKELY(!prev))
return err;
}
@@ -3426,8 +3194,7 @@ dberr_t btr_level_list_remove(const buf_block_t& block,
if (!next)
{
ut_ad(mtr->memo_contains(index.lock, MTR_MEMO_X_LOCK));
- next= btr_block_get(index, id.page_no(), RW_X_LATCH,
- page_is_leaf(block.page.frame), mtr, &err);
+ next= btr_block_get(index, id.page_no(), RW_X_LATCH, mtr, &err);
if (UNIV_UNLIKELY(!next))
return err;
}
@@ -3452,6 +3219,7 @@ btr_lift_page_up(
must not be empty: use
btr_discard_only_page_on_level if the last
record from the page should be removed */
+ que_thr_t* thr, /*!< in/out: query thread */
mtr_t* mtr, /*!< in/out: mini-transaction */
dberr_t* err) /*!< out: error code */
{
@@ -3486,7 +3254,8 @@ btr_lift_page_up(
if (index->is_spatial()) {
offsets = rtr_page_get_father_block(
- nullptr, heap, mtr, nullptr, &cursor);
+ nullptr, heap, nullptr, &cursor,
+ thr, mtr);
} else {
offsets = btr_page_get_father_block(offsets, heap,
mtr, &cursor);
@@ -3507,7 +3276,8 @@ btr_lift_page_up(
if (index->is_spatial()) {
offsets = rtr_page_get_father_block(
- nullptr, heap, mtr, nullptr, &cursor);
+ nullptr, heap, nullptr, &cursor, thr,
+ mtr);
} else {
offsets = btr_page_get_father_block(offsets,
heap,
@@ -3638,13 +3408,8 @@ copied:
/* Free the file page */
btr_page_free(index, block, mtr);
- /* We play it safe and reset the free bits for the father */
- if (!dict_index_is_clust(index)
- && !index->table->is_temporary()) {
- ibuf_reset_free_bits(father_block);
- }
ut_ad(page_validate(father_block->page.frame, index));
- ut_ad(btr_check_node_ptr(index, father_block, mtr));
+ ut_ad(btr_check_node_ptr(index, father_block, thr, mtr));
return(lift_father_up ? block_orig : father_block);
}
@@ -3711,8 +3476,10 @@ btr_compress(
father_cursor.page_cur.block = block;
if (index->is_spatial()) {
+ ut_ad(cursor->rtr_info);
offsets = rtr_page_get_father_block(
- NULL, heap, mtr, cursor, &father_cursor);
+ nullptr, heap, cursor, &father_cursor,
+ cursor->rtr_info->thr, mtr);
ut_ad(cursor->page_cur.block->page.id() == block->page.id());
rec_t* my_rec = father_cursor.page_cur.rec;
@@ -3722,10 +3489,10 @@ btr_compress(
ib::info() << "father positioned on page "
<< page_no << "instead of "
<< block->page.id().page_no();
- offsets = btr_page_get_father_block(
- NULL, heap, mtr, &father_cursor);
+ goto get_offsets;
}
} else {
+get_offsets:
offsets = btr_page_get_father_block(
NULL, heap, mtr, &father_cursor);
}
@@ -3735,14 +3502,7 @@ btr_compress(
if (UNIV_UNLIKELY(!nth_rec || nth_rec == ULINT_UNDEFINED)) {
corrupted:
err = DB_CORRUPTION;
- err_exit:
- /* We play it safe and reset the free bits. */
- if (merge_block && merge_block->zip_size()
- && page_is_leaf(merge_block->page.frame)
- && !index->is_clust()) {
- ibuf_reset_free_bits(merge_block);
- }
- goto func_exit;
+ goto err_exit;
}
}
@@ -3750,7 +3510,10 @@ btr_compress(
/* The page is the only one on the level, lift the records
to the father */
- merge_block = btr_lift_page_up(index, block, mtr, &err);
+ merge_block = btr_lift_page_up(index, block,
+ cursor->rtr_info
+ ? cursor->rtr_info->thr
+ : nullptr, mtr, &err);
success:
if (adjust) {
ut_ad(nth_rec > 0);
@@ -3765,7 +3528,7 @@ success:
}
MONITOR_INC(MONITOR_INDEX_MERGE_SUCCESSFUL);
-func_exit:
+err_exit:
mem_heap_free(heap);
DBUG_RETURN(err);
}
@@ -4065,49 +3828,6 @@ cannot_merge:
}
}
- if (!dict_index_is_clust(index)
- && !index->table->is_temporary()
- && page_is_leaf(merge_page)) {
- /* Update the free bits of the B-tree page in the
- insert buffer bitmap. This has to be done in a
- separate mini-transaction that is committed before the
- main mini-transaction. We cannot update the insert
- buffer bitmap in this mini-transaction, because
- btr_compress() can be invoked recursively without
- committing the mini-transaction in between. Since
- insert buffer bitmap pages have a lower rank than
- B-tree pages, we must not access other pages in the
- same mini-transaction after accessing an insert buffer
- bitmap page. */
-
- /* The free bits in the insert buffer bitmap must
- never exceed the free space on a page. It is safe to
- decrement or reset the bits in the bitmap in a
- mini-transaction that is committed before the
- mini-transaction that affects the free space. */
-
- /* It is unsafe to increment the bits in a separately
- committed mini-transaction, because in crash recovery,
- the free bits could momentarily be set too high. */
-
- if (merge_block->zip_size()) {
- /* Because the free bits may be incremented
- and we cannot update the insert buffer bitmap
- in the same mini-transaction, the only safe
- thing we can do here is the pessimistic
- approach: reset the free bits. */
- ibuf_reset_free_bits(merge_block);
- } else {
- /* On uncompressed pages, the free bits will
- never increase here. Thus, it is safe to
- write the bits accurately in a separate
- mini-transaction. */
- ibuf_update_free_bits_if_full(merge_block,
- srv_page_size,
- ULINT_UNDEFINED);
- }
- }
-
ut_ad(page_validate(merge_page, index));
#ifdef UNIV_ZIP_DEBUG
ut_a(!merge_page_zip || page_zip_validate(merge_page_zip, merge_page,
@@ -4122,7 +3842,10 @@ cannot_merge:
err = btr_page_free(index, block, mtr);
if (err == DB_SUCCESS) {
ut_ad(leftmost_child
- || btr_check_node_ptr(index, merge_block, mtr));
+ || btr_check_node_ptr(index, merge_block,
+ cursor->rtr_info
+ ? cursor->rtr_info->thr
+ : nullptr, mtr));
goto success;
} else {
goto err_exit;
@@ -4139,11 +3862,13 @@ static
void
btr_discard_only_page_on_level(
/*===========================*/
- dict_index_t* index, /*!< in: index tree */
- buf_block_t* block, /*!< in: page which is the only on its level */
+ btr_cur_t* cur, /*!< in: cursor on a page which is the
+ only on its level */
mtr_t* mtr) /*!< in: mtr */
{
- ulint page_level = 0;
+ dict_index_t* index = cur->index();
+ buf_block_t* block = btr_cur_get_block(cur);
+ ulint page_level = 0;
ut_ad(!index->is_dummy);
@@ -4174,7 +3899,8 @@ btr_discard_only_page_on_level(
if (index->is_spatial()) {
/* Check any concurrent search having this page */
rtr_check_discard_page(index, NULL, block);
- if (!rtr_page_get_father(mtr, nullptr, &cursor)) {
+ if (!rtr_page_get_father(mtr, nullptr, &cursor,
+ cur->rtr_info->thr)) {
return;
}
} else {
@@ -4240,9 +3966,6 @@ btr_discard_only_page_on_level(
index->clear_instant_add();
}
} else if (!index->table->is_temporary()) {
- /* We play it safe and reset the free bits for the root */
- ibuf_reset_free_bits(block);
-
ut_a(max_trx_id);
page_set_max_trx_id(block,
buf_block_get_page_zip(block),
@@ -4279,7 +4002,8 @@ btr_discard_page(
MONITOR_INC(MONITOR_INDEX_DISCARD);
if (index->is_spatial()
- ? !rtr_page_get_father(mtr, cursor, &parent_cursor)
+ ? !rtr_page_get_father(mtr, cursor, &parent_cursor,
+ cursor->rtr_info->thr)
: !btr_page_get_father(mtr, &parent_cursor)) {
return DB_CORRUPTION;
}
@@ -4353,7 +4077,7 @@ btr_discard_page(
return DB_CORRUPTION;
}
} else {
- btr_discard_only_page_on_level(index, block, mtr);
+ btr_discard_only_page_on_level(cursor, mtr);
return DB_SUCCESS;
}
@@ -4408,14 +4132,20 @@ btr_discard_page(
If the merge_block's parent block is not same,
we cannot use btr_check_node_ptr() */
ut_ad(parent_is_different
- || btr_check_node_ptr(index, merge_block, mtr));
+ || btr_check_node_ptr(index, merge_block,
+ cursor->rtr_info
+ ? cursor->rtr_info->thr
+ : nullptr, mtr));
if (btr_cur_get_block(&parent_cursor)->page.id().page_no()
== index->page
&& !page_has_siblings(btr_cur_get_page(&parent_cursor))
&& page_get_n_recs(btr_cur_get_page(&parent_cursor))
== 1) {
- btr_lift_page_up(index, merge_block, mtr, &err);
+ btr_lift_page_up(index, merge_block,
+ cursor->rtr_info
+ ? cursor->rtr_info->thr
+ : nullptr, mtr, &err);
}
}
@@ -4434,13 +4164,6 @@ btr_print_size(
fseg_header_t* seg;
mtr_t mtr;
- if (dict_index_is_ibuf(index)) {
- fputs("Sorry, cannot print info of an ibuf tree:"
- " use ibuf functions\n", stderr);
-
- return;
- }
-
mtr_start(&mtr);
root = btr_root_get(index, &mtr);
@@ -4450,13 +4173,10 @@ btr_print_size(
fputs("INFO OF THE NON-LEAF PAGE SEGMENT\n", stderr);
fseg_print(seg, &mtr);
- if (!dict_index_is_ibuf(index)) {
-
- seg = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF;
+ seg = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF;
- fputs("INFO OF THE LEAF PAGE SEGMENT\n", stderr);
- fseg_print(seg, &mtr);
- }
+ fputs("INFO OF THE LEAF PAGE SEGMENT\n", stderr);
+ fseg_print(seg, &mtr);
mtr_commit(&mtr);
}
@@ -4567,6 +4287,7 @@ btr_check_node_ptr(
/*===============*/
dict_index_t* index, /*!< in: index tree */
buf_block_t* block, /*!< in: index page */
+ que_thr_t* thr, /*!< in/out: query thread */
mtr_t* mtr) /*!< in: mtr */
{
mem_heap_t* heap;
@@ -4588,8 +4309,8 @@ btr_check_node_ptr(
heap = mem_heap_create(256);
if (dict_index_is_spatial(index)) {
- offsets = rtr_page_get_father_block(NULL, heap, mtr,
- NULL, &cursor);
+ offsets = rtr_page_get_father_block(NULL, heap,
+ NULL, &cursor, thr, mtr);
} else {
offsets = btr_page_get_father_block(NULL, heap, mtr, &cursor);
}
@@ -4664,14 +4385,6 @@ btr_index_rec_validate(
ut_ad(index->n_core_fields);
- if (index->is_ibuf()) {
- /* The insert buffer index tree can contain records from any
- other index: we cannot check the number of fields or
- their length */
-
- return(TRUE);
- }
-
#ifdef VIRTUAL_INDEX_DEBUG
if (dict_index_has_virtual(index)) {
fprintf(stderr, "index name is %s\n", index->name());
@@ -4999,8 +4712,7 @@ corrupted:
mtr.release_last_page();
block = btr_block_get(*index, left_page_no,
- RW_SX_LATCH, false,
- &mtr, &err);
+ RW_SX_LATCH, &mtr, &err);
if (!block) {
goto invalid_page;
}
@@ -5071,7 +4783,7 @@ func_exit:
const rec_t* right_rec;
right_block = btr_block_get(*index, right_page_no, RW_SX_LATCH,
- !level, &mtr, &err);
+ &mtr, &err);
if (!right_block) {
btr_validate_report1(index, level, block);
fputs("InnoDB: broken FIL_PAGE_NEXT link\n", stderr);
@@ -5324,7 +5036,7 @@ node_ptr_fails:
mtr.start();
block = btr_block_get(*index, right_page_no, RW_SX_LATCH,
- !level, &mtr, &err);
+ &mtr, &err);
goto loop;
}
@@ -5391,8 +5103,7 @@ error:
index = btr_cur_get_index(cursor);
page = btr_cur_get_page(cursor);
- mblock = btr_block_get(*index, page_no, RW_X_LATCH, page_is_leaf(page),
- mtr);
+ mblock = btr_block_get(*index, page_no, RW_X_LATCH, mtr);
if (!mblock) {
goto error;
}
diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc
index 013cd13102c..3c5b4b293f2 100644
--- a/storage/innobase/btr/btr0bulk.cc
+++ b/storage/innobase/btr/btr0bulk.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2014, 2019, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -28,7 +28,6 @@ Created 03/11/2014 Shaohua Wang
#include "btr0btr.h"
#include "btr0cur.h"
#include "btr0pcur.h"
-#include "ibuf0ibuf.h"
#include "page0page.h"
#include "trx0trx.h"
@@ -107,7 +106,7 @@ oom:
}
} else {
new_block = btr_block_get(*m_index, m_page_no, RW_X_LATCH,
- false, &m_mtr);
+ &m_mtr);
if (!new_block) {
m_mtr.commit();
return(DB_CORRUPTION);
@@ -122,7 +121,7 @@ oom:
m_page_zip = buf_block_get_page_zip(new_block);
- if (!m_level && dict_index_is_sec_or_ibuf(m_index)) {
+ if (!m_level && !m_index->is_primary()) {
page_update_max_trx_id(new_block, m_page_zip, m_trx_id,
&m_mtr);
}
@@ -563,9 +562,6 @@ inline void PageBulk::finish()
void PageBulk::commit(bool success)
{
finish();
- if (success && !m_index->is_clust() && page_is_leaf(m_page))
- ibuf_set_bitmap_for_bulk_load(m_block, &m_mtr,
- innobase_fill_factor == 100);
m_mtr.commit();
}
@@ -1194,7 +1190,7 @@ BtrBulk::finish(dberr_t err)
ut_ad(last_page_no != FIL_NULL);
last_block = btr_block_get(*m_index, last_page_no, RW_X_LATCH,
- false, &mtr);
+ &mtr);
if (!last_block) {
err = DB_CORRUPTION;
err_exit:
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 71177e228ec..74db3fa3d8f 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -61,7 +61,6 @@ Created 10/16/1994 Heikki Tuuri
#include "que0que.h"
#include "row0row.h"
#include "srv0srv.h"
-#include "ibuf0ibuf.h"
#include "lock0lock.h"
#include "zlib.h"
#include "srv0start.h"
@@ -73,15 +72,6 @@ Created 10/16/1994 Heikki Tuuri
#endif /* WITH_WSREP */
#include "log.h"
-/** Buffered B-tree operation types, introduced as part of delete buffering. */
-enum btr_op_t {
- BTR_NO_OP = 0, /*!< Not buffered */
- BTR_INSERT_OP, /*!< Insert, do not ignore UNIQUE */
- BTR_INSERT_IGNORE_UNIQUE_OP, /*!< Insert, ignoring UNIQUE */
- BTR_DELETE_OP, /*!< Purge a delete-marked record */
- BTR_DELMARK_OP /*!< Mark a record for deletion */
-};
-
/** Modification types for the B-tree operation.
Note that the order must be DELETE, BOTH, INSERT !!
*/
@@ -197,10 +187,14 @@ when loading a table definition.
static dberr_t btr_cur_instant_init_low(dict_index_t* index, mtr_t* mtr)
{
ut_ad(index->is_primary());
- ut_ad(index->n_core_null_bytes == dict_index_t::NO_CORE_NULL_BYTES);
- ut_ad(index->table->supports_instant());
ut_ad(index->table->is_readable());
+ if (!index->table->supports_instant()) {
+ return DB_SUCCESS;
+ }
+
+ ut_ad(index->n_core_null_bytes == dict_index_t::NO_CORE_NULL_BYTES);
+
dberr_t err;
const fil_space_t* space = index->table->space;
if (!space) {
@@ -467,17 +461,25 @@ when loading a table definition.
@param[in,out] table table definition from the data dictionary
@return error code
@retval DB_SUCCESS if no error occurred */
-dberr_t
-btr_cur_instant_init(dict_table_t* table)
+dberr_t btr_cur_instant_init(dict_table_t *table)
{
- mtr_t mtr;
- dict_index_t* index = dict_table_get_first_index(table);
- mtr.start();
- dberr_t err = index
- ? btr_cur_instant_init_low(index, &mtr)
- : DB_CORRUPTION;
- mtr.commit();
- return(err);
+ mtr_t mtr;
+ dict_index_t *index= dict_table_get_first_index(table);
+ mtr.start();
+ dberr_t err = index ? btr_cur_instant_init_low(index, &mtr) : DB_CORRUPTION;
+ mtr.commit();
+ if (err == DB_SUCCESS && index->is_gen_clust())
+ {
+ btr_cur_t cur;
+ mtr.start();
+ err= cur.open_leaf(false, index, BTR_SEARCH_LEAF, &mtr);
+ if (err != DB_SUCCESS);
+ else if (const rec_t *rec= page_rec_get_prev(btr_cur_get_rec(&cur)))
+ if (page_rec_is_user_rec(rec))
+ table->row_id= mach_read_from_6(rec);
+ mtr.commit();
+ }
+ return(err);
}
/** Initialize the n_core_null_bytes on first access to a clustered
@@ -783,20 +785,6 @@ static bool btr_cur_need_opposite_intention(const page_t *page,
@return maximum size of a node pointer record in bytes */
static ulint btr_node_ptr_max_size(const dict_index_t* index)
{
- if (dict_index_is_ibuf(index)) {
- /* cannot estimate accurately */
- /* This is universal index for change buffer.
- The max size of the entry is about max key length * 2.
- (index key + primary key to be inserted to the index)
- (The max key length is UNIV_PAGE_SIZE / 16 * 3 at
- ha_innobase::max_supported_key_length(),
- considering MAX_KEY_LENGTH = 3072 at MySQL imposes
- the 3500 historical InnoDB value for 16K page size case.)
- For the universal index, node_ptr contains most of the entry.
- And 512 is enough to contain ibuf columns and meta-data */
- return srv_page_size / 8 * 3 + 512;
- }
-
/* Each record has page_no, length of page_no and header. */
ulint comp = dict_table_is_comp(index->table);
ulint rec_max_size = comp
@@ -935,11 +923,9 @@ static inline page_cur_mode_t btr_cur_nonleaf_mode(page_cur_mode_t mode)
dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
btr_latch_mode latch_mode, mtr_t *mtr)
{
- ut_ad(index()->is_btree() || index()->is_ibuf());
- ut_ad(!index()->is_ibuf() || ibuf_inside(mtr));
+ ut_ad(index()->is_btree());
buf_block_t *guess;
- btr_op_t btr_op;
btr_intention_t lock_intention;
bool detected_same_key_root= false;
@@ -967,34 +953,6 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
MTR_MEMO_S_LOCK | MTR_MEMO_SX_LOCK |
MTR_MEMO_X_LOCK));
- /* These flags are mutually exclusive, they are lumped together
- with the latch mode for historical reasons. It's possible for
- none of the flags to be set. */
- switch (UNIV_EXPECT(latch_mode & BTR_DELETE, 0)) {
- default:
- btr_op= BTR_NO_OP;
- break;
- case BTR_INSERT:
- btr_op= (latch_mode & BTR_IGNORE_SEC_UNIQUE)
- ? BTR_INSERT_IGNORE_UNIQUE_OP
- : BTR_INSERT_OP;
- break;
- case BTR_DELETE:
- btr_op= BTR_DELETE_OP;
- ut_a(purge_node);
- break;
- case BTR_DELETE_MARK:
- btr_op= BTR_DELMARK_OP;
- break;
- }
-
- /* Operations on the insert buffer tree cannot be buffered. */
- ut_ad(btr_op == BTR_NO_OP || !index()->is_ibuf());
- /* Operations on the clustered index cannot be buffered. */
- ut_ad(btr_op == BTR_NO_OP || !index()->is_clust());
- /* Operations on the temporary table(indexes) cannot be buffered. */
- ut_ad(btr_op == BTR_NO_OP || !index()->table->is_temporary());
-
const bool latch_by_caller= latch_mode & BTR_ALREADY_S_LATCHED;
lock_intention= btr_cur_get_and_clear_intention(&latch_mode);
latch_mode= BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode);
@@ -1016,7 +974,7 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
# ifdef UNIV_SEARCH_PERF_STAT
info->n_searches++;
# endif
- bool ahi_enabled= btr_search_enabled && !index()->is_ibuf();
+ bool ahi_enabled= btr_search_enabled;
/* We do a dirty read of btr_search_enabled below,
and btr_search_guess_on_hash() will have to check it again. */
if (!ahi_enabled);
@@ -1094,84 +1052,19 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
up_bytes= 0;
low_match= 0;
low_bytes= 0;
- ulint buf_mode= BUF_GET;
search_loop:
dberr_t err;
auto block_savepoint= mtr->get_savepoint();
buf_block_t *block=
- buf_page_get_gen(page_id, zip_size, rw_latch, guess, buf_mode, mtr,
- &err, height == 0 && !index()->is_clust());
+ buf_page_get_gen(page_id, zip_size, rw_latch, guess, BUF_GET, mtr, &err);
if (!block)
{
- switch (err) {
- case DB_DECRYPTION_FAILED:
+ if (err == DB_DECRYPTION_FAILED)
btr_decryption_failed(*index());
- /* fall through */
- default:
- func_exit:
- if (UNIV_LIKELY_NULL(heap))
- mem_heap_free(heap);
- return err;
- case DB_SUCCESS:
- /* This must be a search to perform an insert, delete mark, or delete;
- try using the change buffer */
- ut_ad(height == 0);
- ut_ad(thr);
- break;
- }
-
- switch (btr_op) {
- default:
- MY_ASSERT_UNREACHABLE();
- break;
- case BTR_INSERT_OP:
- case BTR_INSERT_IGNORE_UNIQUE_OP:
- ut_ad(buf_mode == BUF_GET_IF_IN_POOL);
-
- if (ibuf_insert(IBUF_OP_INSERT, tuple, index(), page_id, zip_size, thr))
- {
- flag= BTR_CUR_INSERT_TO_IBUF;
- goto func_exit;
- }
- break;
-
- case BTR_DELMARK_OP:
- ut_ad(buf_mode == BUF_GET_IF_IN_POOL);
-
- if (ibuf_insert(IBUF_OP_DELETE_MARK, tuple,
- index(), page_id, zip_size, thr))
- {
- flag = BTR_CUR_DEL_MARK_IBUF;
- goto func_exit;
- }
-
- break;
-
- case BTR_DELETE_OP:
- ut_ad(buf_mode == BUF_GET_IF_IN_POOL_OR_WATCH);
- auto& chain = buf_pool.page_hash.cell_get(page_id.fold());
-
- if (!row_purge_poss_sec(purge_node, index(), tuple))
- /* The record cannot be purged yet. */
- flag= BTR_CUR_DELETE_REF;
- else if (ibuf_insert(IBUF_OP_DELETE, tuple, index(),
- page_id, zip_size, thr))
- /* The purge was buffered. */
- flag= BTR_CUR_DELETE_IBUF;
- else
- {
- /* The purge could not be buffered. */
- buf_pool.watch_unset(page_id, chain);
- break;
- }
-
- buf_pool.watch_unset(page_id, chain);
- goto func_exit;
- }
-
- /* Change buffering did not succeed, we must read the page. */
- buf_mode= BUF_GET;
- goto search_loop;
+ func_exit:
+ if (UNIV_LIKELY_NULL(heap))
+ mem_heap_free(heap);
+ return err;
}
if (!!page_is_comp(block->page.frame) != index()->table->not_redundant() ||
@@ -1303,22 +1196,18 @@ dberr_t btr_cur_t::search_leaf(const dtuple_t *tuple, page_cur_mode_t mode,
rw_latch= rw_lock_type_t(latch_mode & (RW_X_LATCH | RW_S_LATCH));
if (page_has_prev(block->page.frame) &&
!btr_block_get(*index(), btr_page_get_prev(block->page.frame),
- rw_latch, false, mtr, &err))
+ rw_latch, mtr, &err))
goto func_exit;
mtr->upgrade_buffer_fix(block_savepoint, rw_latch);
if (page_has_next(block->page.frame) &&
!btr_block_get(*index(), btr_page_get_next(block->page.frame),
- rw_latch, false, mtr, &err))
+ rw_latch, mtr, &err))
goto func_exit;
}
goto release_tree;
case BTR_SEARCH_LEAF:
case BTR_MODIFY_LEAF:
- if (rw_latch == RW_NO_LATCH)
- {
- ut_ad(index()->is_ibuf());
- mtr->upgrade_buffer_fix(block_savepoint, rw_lock_type_t(latch_mode));
- }
+ ut_ad(rw_latch == rw_lock_type_t(latch_mode));
if (!latch_by_caller)
{
release_tree:
@@ -1336,12 +1225,12 @@ release_tree:
/* x-latch also siblings from left to right */
if (page_has_prev(block->page.frame) &&
!btr_block_get(*index(), btr_page_get_prev(block->page.frame),
- RW_X_LATCH, false, mtr, &err))
+ RW_X_LATCH, mtr, &err))
goto func_exit;
mtr->upgrade_buffer_fix(block_savepoint, RW_X_LATCH);
if (page_has_next(block->page.frame) &&
!btr_block_get(*index(), btr_page_get_next(block->page.frame),
- RW_X_LATCH, false, mtr, &err))
+ RW_X_LATCH, mtr, &err))
goto func_exit;
if (btr_cur_need_opposite_intention(block->page.frame, lock_intention,
node_ptr_max_size, compress_limit,
@@ -1476,7 +1365,7 @@ release_tree:
case BTR_MODIFY_ROOT_AND_LEAF:
rw_latch= RW_X_LATCH;
break;
- case BTR_MODIFY_PREV: /* ibuf_insert() or btr_pcur_move_to_prev() */
+ case BTR_MODIFY_PREV: /* btr_pcur_move_to_prev() */
case BTR_SEARCH_PREV: /* btr_pcur_move_to_prev() */
ut_ad(rw_latch == RW_S_LATCH || rw_latch == RW_X_LATCH);
@@ -1488,7 +1377,7 @@ release_tree:
of the current page. */
buf_block_t *left= btr_block_get(*index(),
btr_page_get_prev(block->page.frame),
- RW_NO_LATCH, false, mtr, &err);
+ RW_NO_LATCH, mtr, &err);
if (UNIV_UNLIKELY(!left))
goto func_exit;
ut_ad(block_savepoint + 2 == mtr->get_savepoint());
@@ -1520,16 +1409,7 @@ release_tree:
goto leaf_with_no_latch;
case BTR_MODIFY_LEAF:
case BTR_SEARCH_LEAF:
- if (index()->is_ibuf())
- goto leaf_with_no_latch;
rw_latch= rw_lock_type_t(latch_mode);
- if (btr_op != BTR_NO_OP &&
- ibuf_should_try(index(), btr_op != BTR_INSERT_OP))
- /* Try to buffer the operation if the leaf page
- is not in the buffer pool. */
- buf_mode= btr_op == BTR_DELETE_OP
- ? BUF_GET_IF_IN_POOL_OR_WATCH
- : BUF_GET_IF_IN_POOL;
break;
case BTR_MODIFY_TREE:
ut_ad(rw_latch == RW_X_LATCH);
@@ -1568,8 +1448,7 @@ ATTRIBUTE_COLD
dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
page_cur_mode_t mode, mtr_t *mtr)
{
- ut_ad(index()->is_btree() || index()->is_ibuf());
- ut_ad(!index()->is_ibuf() || ibuf_inside(mtr));
+ ut_ad(index()->is_btree());
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets = offsets_;
@@ -1649,7 +1528,7 @@ dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
const auto block_savepoint= mtr->get_savepoint();
block=
buf_page_get_gen(page_id, block->zip_size(), RW_NO_LATCH, nullptr, BUF_GET,
- mtr, &err, !--height && !index()->is_clust());
+ mtr, &err);
if (!block)
{
@@ -1664,12 +1543,12 @@ dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
!fil_page_index_page_check(block->page.frame))
goto corrupted;
- if (height != btr_page_get_level(block->page.frame))
+ if (--height != btr_page_get_level(block->page.frame))
goto corrupted;
if (page_has_prev(block->page.frame) &&
!btr_block_get(*index(), btr_page_get_prev(block->page.frame),
- RW_X_LATCH, false, mtr, &err))
+ RW_X_LATCH, mtr, &err))
goto func_exit;
mtr->upgrade_buffer_fix(block_savepoint, RW_X_LATCH);
#ifdef UNIV_ZIP_DEBUG
@@ -1678,7 +1557,7 @@ dberr_t btr_cur_t::pessimistic_search_leaf(const dtuple_t *tuple,
#endif /* UNIV_ZIP_DEBUG */
if (page_has_next(block->page.frame) &&
!btr_block_get(*index(), btr_page_get_next(block->page.frame),
- RW_X_LATCH, false, mtr, &err))
+ RW_X_LATCH, mtr, &err))
goto func_exit;
goto search_loop;
}
@@ -1708,14 +1587,14 @@ dberr_t btr_cur_search_to_nth_level(ulint level,
{
dict_index_t *const index= cursor->index();
- ut_ad(index->is_btree() || index->is_ibuf());
+ ut_ad(index->is_btree());
mem_heap_t *heap= nullptr;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs *offsets= offsets_;
rec_offs_init(offsets_);
ut_ad(level);
ut_ad(dict_index_check_search_tuple(index, tuple));
- ut_ad(index->is_ibuf() ? ibuf_inside(mtr) : index->is_btree());
+ ut_ad(index->is_btree());
ut_ad(dtuple_check_typed(tuple));
ut_ad(index->page != FIL_NULL);
@@ -1860,7 +1739,7 @@ dberr_t btr_cur_t::open_leaf(bool first, dict_index_t *index,
/* This function doesn't need to lock left page of the leaf page */
static_assert(int{BTR_SEARCH_PREV} == (4 | BTR_SEARCH_LEAF), "");
static_assert(int{BTR_MODIFY_PREV} == (4 | BTR_MODIFY_LEAF), "");
- latch_mode= btr_latch_mode(latch_mode & ~4);
+ latch_mode= btr_latch_mode(latch_mode & (RW_S_LATCH | RW_X_LATCH));
ut_ad(!latch_by_caller ||
mtr->memo_contains_flagged(&index->lock,
MTR_MEMO_SX_LOCK | MTR_MEMO_S_LOCK));
@@ -1891,9 +1770,7 @@ index_locked:
const rw_lock_type_t rw_latch= height && latch_mode != BTR_MODIFY_TREE
? upper_rw_latch
: RW_NO_LATCH;
- buf_block_t* block=
- btr_block_get(*index, page, rw_latch, !height && !index->is_clust(), mtr,
- &err);
+ buf_block_t* block= btr_block_get(*index, page, rw_latch, mtr, &err);
ut_ad(!block == (err != DB_SUCCESS));
@@ -1940,12 +1817,12 @@ index_locked:
/* x-latch also siblings from left to right */
if (page_has_prev(block->page.frame) &&
!btr_block_get(*index, btr_page_get_prev(block->page.frame),
- RW_X_LATCH, false, mtr, &err))
+ RW_X_LATCH, mtr, &err))
break;
mtr->upgrade_buffer_fix(leaf_savepoint - 1, RW_X_LATCH);
if (page_has_next(block->page.frame) &&
!btr_block_get(*index, btr_page_get_next(block->page.frame),
- RW_X_LATCH, false, mtr, &err))
+ RW_X_LATCH, mtr, &err))
break;
if (!index->lock.have_x() &&
@@ -2065,11 +1942,6 @@ be freed by reorganizing. Differs from btr_cur_optimistic_insert because
no heuristics is applied to whether it pays to use CPU time for
reorganizing the page or not.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if this is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
@return pointer to inserted record if succeed, else NULL */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
rec_t*
@@ -2238,9 +2110,6 @@ static void btr_cur_prefetch_siblings(const buf_block_t *block,
{
ut_ad(page_is_leaf(block->page.frame));
- if (index->is_ibuf())
- return;
-
const page_t *page= block->page.frame;
uint32_t prev= mach_read_from_4(my_assume_aligned<4>(page + FIL_PAGE_PREV));
uint32_t next= mach_read_from_4(my_assume_aligned<4>(page + FIL_PAGE_NEXT));
@@ -2475,14 +2344,6 @@ fail_err:
if (*rec) {
} else if (block->page.zip.data) {
ut_ad(!index->table->is_temporary());
- /* Reset the IBUF_BITMAP_FREE bits, because
- page_cur_tuple_insert() will have attempted page
- reorganize before failing. */
- if (leaf
- && !dict_index_is_clust(index)) {
- ibuf_reset_free_bits(block);
- }
-
goto fail;
} else {
ut_ad(!reorg);
@@ -2523,34 +2384,6 @@ fail_err:
lock_update_insert(block, *rec);
}
- if (leaf
- && !dict_index_is_clust(index)
- && !index->table->is_temporary()) {
- /* Update the free bits of the B-tree page in the
- insert buffer bitmap. */
-
- /* The free bits in the insert buffer bitmap must
- never exceed the free space on a page. It is safe to
- decrement or reset the bits in the bitmap in a
- mini-transaction that is committed before the
- mini-transaction that affects the free space. */
-
- /* It is unsafe to increment the bits in a separately
- committed mini-transaction, because in crash recovery,
- the free bits could momentarily be set too high. */
-
- if (block->page.zip.data) {
- /* Update the bits in the same mini-transaction. */
- ibuf_update_free_bits_zip(block, mtr);
- } else {
- /* Decrement the bits in a separate
- mini-transaction. */
- ibuf_update_free_bits_if_full(
- block, max_size,
- rec_size + PAGE_DIR_SLOT_SIZE);
- }
- }
-
*big_rec = big_rec_vec;
return(DB_SUCCESS);
@@ -2621,12 +2454,10 @@ btr_cur_pessimistic_insert(
the index tree, so that the insert will not fail because of
lack of space */
- if (!index->is_ibuf()
- && (err = fsp_reserve_free_extents(&n_reserved, index->table->space,
- uint32_t(cursor->tree_height / 16
- + 3),
- FSP_NORMAL, mtr))
- != DB_SUCCESS) {
+ err = fsp_reserve_free_extents(&n_reserved, index->table->space,
+ uint32_t(cursor->tree_height / 16 + 3),
+ FSP_NORMAL, mtr);
+ if (err != DB_SUCCESS) {
return err;
}
@@ -2658,11 +2489,21 @@ btr_cur_pessimistic_insert(
}
}
- *rec = index->page == btr_cur_get_block(cursor)->page.id().page_no()
- ? btr_root_raise_and_insert(flags, cursor, offsets, heap,
- entry, n_ext, mtr, &err)
- : btr_page_split_and_insert(flags, cursor, offsets, heap,
- entry, n_ext, mtr, &err);
+ if (index->page == btr_cur_get_block(cursor)->page.id().page_no()) {
+ *rec = index->is_spatial()
+ ? rtr_root_raise_and_insert(flags, cursor, offsets,
+ heap, entry, n_ext, mtr,
+ &err, thr)
+ : btr_root_raise_and_insert(flags, cursor, offsets,
+ heap, entry, n_ext, mtr,
+ &err);
+ } else if (index->is_spatial()) {
+ *rec = rtr_page_split_and_insert(flags, cursor, offsets, heap,
+ entry, n_ext, mtr, &err, thr);
+ } else {
+ *rec = btr_page_split_and_insert(flags, cursor, offsets, heap,
+ entry, n_ext, mtr, &err);
+ }
if (!*rec) {
goto func_exit;
@@ -2906,14 +2747,8 @@ static dberr_t btr_cur_upd_rec_sys(buf_block_t *block, rec_t *rec,
See if there is enough place in the page modification log to log
an update-in-place.
-@retval false if out of space; IBUF_BITMAP_FREE will be reset
-outside mtr if the page was recompressed
-@retval true if enough place;
-
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE if this is
-a secondary index leaf page. This has to be done either within the
-same mini-transaction, or by invoking ibuf_reset_free_bits() before
-mtr_commit(mtr). */
+@retval false if out of space
+@retval true if enough place */
bool
btr_cur_update_alloc_zip_func(
/*==========================*/
@@ -2934,7 +2769,6 @@ btr_cur_update_alloc_zip_func(
const page_t* page = page_cur_get_page(cursor);
ut_ad(page_zip == page_cur_get_page_zip(cursor));
- ut_ad(!dict_index_is_ibuf(index));
ut_ad(rec_offs_validate(page_cur_get_rec(cursor), index, offsets));
if (page_zip_available(page_zip, dict_index_is_clust(index),
@@ -2958,26 +2792,8 @@ btr_cur_update_alloc_zip_func(
rec_offs_make_valid(page_cur_get_rec(cursor), index,
page_is_leaf(page), offsets);
- /* After recompressing a page, we must make sure that the free
- bits in the insert buffer bitmap will not exceed the free
- space on the page. Because this function will not attempt
- recompression unless page_zip_available() fails above, it is
- safe to reset the free bits if page_zip_available() fails
- again, below. The free bits can safely be reset in a separate
- mini-transaction. If page_zip_available() succeeds below, we
- can be sure that the btr_page_reorganize() above did not reduce
- the free space available on the page. */
-
- if (page_zip_available(page_zip, dict_index_is_clust(index),
- length, create)) {
- return true;
- }
- }
-
- if (!dict_index_is_clust(index)
- && !index->table->is_temporary()
- && page_is_leaf(page)) {
- ibuf_reset_free_bits(page_cur_get_block(cursor));
+ return page_zip_available(page_zip, dict_index_is_clust(index),
+ length, create);
}
return(false);
@@ -3126,7 +2942,7 @@ We assume here that the ordering fields of the record do not change.
@return locking or undo log related error code, or
@retval DB_SUCCESS on success
@retval DB_ZIP_OVERFLOW if there is not enough space left
-on the compressed page (IBUF_BITMAP_FREE was reset outside mtr) */
+on a ROW_FORMAT=COMPRESSED page */
dberr_t
btr_cur_update_in_place(
/*====================*/
@@ -3146,7 +2962,6 @@ btr_cur_update_in_place(
further pages */
{
dict_index_t* index;
- dberr_t err;
rec_t* rec;
roll_ptr_t roll_ptr = 0;
ulint was_delete_marked;
@@ -3154,17 +2969,14 @@ btr_cur_update_in_place(
ut_ad(page_is_leaf(cursor->page_cur.block->page.frame));
rec = btr_cur_get_rec(cursor);
index = cursor->index();
- ut_ad(!index->is_ibuf());
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG)
|| index->table->is_temporary());
- /* The insert buffer tree should never be updated in place. */
- ut_ad(!dict_index_is_ibuf(index));
ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG)
- || dict_index_is_clust(index));
+ || index->is_primary());
ut_ad(thr_get_trx(thr)->id == trx_id
- || (flags & ulint(~(BTR_KEEP_POS_FLAG | BTR_KEEP_IBUF_BITMAP)))
+ || (flags & ulint(~BTR_KEEP_POS_FLAG))
== (BTR_NO_UNDO_LOG_FLAG | BTR_NO_LOCKING_FLAG
| BTR_CREATE_FLAG | BTR_KEEP_SYS_FLAG));
ut_ad(fil_page_index_page_check(btr_cur_get_page(cursor)));
@@ -3194,22 +3006,17 @@ btr_cur_update_in_place(
}
/* Do lock checking and undo logging */
- err = btr_cur_upd_lock_and_undo(flags, cursor, offsets,
- update, cmpl_info,
- thr, mtr, &roll_ptr);
- if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
- /* We may need to update the IBUF_BITMAP_FREE
- bits after a reorganize that was done in
- btr_cur_update_alloc_zip(). */
- goto func_exit;
+ if (dberr_t err = btr_cur_upd_lock_and_undo(flags, cursor, offsets,
+ update, cmpl_info,
+ thr, mtr, &roll_ptr)) {
+ return err;
}
- if (!(flags & BTR_KEEP_SYS_FLAG)) {
- err = btr_cur_upd_rec_sys(block, rec, index, offsets,
- thr_get_trx(thr), roll_ptr, mtr);
- if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
- goto func_exit;
- }
+ if (flags & BTR_KEEP_SYS_FLAG) {
+ } else if (dberr_t err = btr_cur_upd_rec_sys(block, rec, index, offsets,
+ thr_get_trx(thr),
+ roll_ptr, mtr)) {
+ return err;
}
was_delete_marked = rec_get_deleted_flag(
@@ -3267,19 +3074,7 @@ btr_cur_update_in_place(
btr_cur_unmark_extern_fields(block, rec, index, offsets, mtr);
}
- ut_ad(err == DB_SUCCESS);
-
-func_exit:
- if (page_zip
- && !(flags & BTR_KEEP_IBUF_BITMAP)
- && !dict_index_is_clust(index)
- && page_is_leaf(buf_block_get_frame(block))) {
- /* Update the free bits in the insert buffer. */
- ut_ad(!index->table->is_temporary());
- ibuf_update_free_bits_zip(block, mtr);
- }
-
- return(err);
+ return DB_SUCCESS;
}
/** Trim a metadata record during the rollback of instant ALTER TABLE.
@@ -3423,7 +3218,7 @@ fields of the record do not change.
@retval DB_OVERFLOW if the updated record does not fit
@retval DB_UNDERFLOW if the page would become too empty
@retval DB_ZIP_OVERFLOW if there is not enough space left
-on the compressed page (IBUF_BITMAP_FREE was reset outside mtr) */
+on a ROW_FORMAT=COMPRESSED page */
dberr_t
btr_cur_optimistic_update(
/*======================*/
@@ -3454,7 +3249,6 @@ btr_cur_optimistic_update(
ulint max_size;
ulint new_rec_size;
ulint old_rec_size;
- ulint max_ins_size = 0;
dtuple_t* new_entry;
roll_ptr_t roll_ptr;
ulint i;
@@ -3463,19 +3257,16 @@ btr_cur_optimistic_update(
page = buf_block_get_frame(block);
rec = btr_cur_get_rec(cursor);
index = cursor->index();
- ut_ad(index->has_locking());
ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG)
|| index->table->is_temporary());
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
/* This is intended only for leaf page updates */
ut_ad(page_is_leaf(page));
- /* The insert buffer tree should never be updated in place. */
- ut_ad(!dict_index_is_ibuf(index));
ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG)
|| dict_index_is_clust(index));
ut_ad(thr_get_trx(thr)->id == trx_id
- || (flags & ulint(~(BTR_KEEP_POS_FLAG | BTR_KEEP_IBUF_BITMAP)))
+ || (flags & ulint(~BTR_KEEP_POS_FLAG))
== (BTR_NO_UNDO_LOG_FLAG | BTR_NO_LOCKING_FLAG
| BTR_CREATE_FLAG | BTR_KEEP_SYS_FLAG));
ut_ad(fil_page_index_page_check(page));
@@ -3504,7 +3295,6 @@ btr_cur_optimistic_update(
if (rec_offs_any_extern(*offsets)) {
any_extern:
- ut_ad(!index->is_ibuf());
/* Externally stored fields are treated in pessimistic
update */
@@ -3585,9 +3375,6 @@ any_extern:
if (UNIV_UNLIKELY(new_rec_size
>= (page_get_free_space_of_empty(page_is_comp(page))
/ 2))) {
- /* We may need to update the IBUF_BITMAP_FREE
- bits after a reorganize that was done in
- btr_cur_update_alloc_zip(). */
err = DB_OVERFLOW;
goto func_exit;
}
@@ -3595,10 +3382,6 @@ any_extern:
if (UNIV_UNLIKELY(page_get_data_size(page)
- old_rec_size + new_rec_size
< BTR_CUR_PAGE_COMPRESS_LIMIT(index))) {
- /* We may need to update the IBUF_BITMAP_FREE
- bits after a reorganize that was done in
- btr_cur_update_alloc_zip(). */
-
/* The page would become too empty */
err = DB_UNDERFLOW;
goto func_exit;
@@ -3611,19 +3394,9 @@ any_extern:
: (old_rec_size
+ page_get_max_insert_size_after_reorganize(page, 1));
- if (!page_zip) {
- max_ins_size = page_get_max_insert_size_after_reorganize(
- page, 1);
- }
-
if (!(((max_size >= BTR_CUR_PAGE_REORGANIZE_LIMIT)
&& (max_size >= new_rec_size))
|| (page_get_n_recs(page) <= 1))) {
-
- /* We may need to update the IBUF_BITMAP_FREE
- bits after a reorganize that was done in
- btr_cur_update_alloc_zip(). */
-
/* There was not enough space, or it did not pay to
reorganize: for simplicity, we decide what to do assuming a
reorganization is needed, though it might not be necessary */
@@ -3637,9 +3410,6 @@ any_extern:
update, cmpl_info,
thr, mtr, &roll_ptr);
if (err != DB_SUCCESS) {
- /* We may need to update the IBUF_BITMAP_FREE
- bits after a reorganize that was done in
- btr_cur_update_alloc_zip(). */
goto func_exit;
}
@@ -3695,22 +3465,11 @@ any_extern:
ut_ad(err == DB_SUCCESS);
if (!page_cur_move_to_next(page_cursor)) {
corrupted:
- err = DB_CORRUPTION;
- }
-
-func_exit:
- if (!(flags & BTR_KEEP_IBUF_BITMAP)
- && !dict_index_is_clust(index)) {
- /* Update the free bits in the insert buffer. */
- if (page_zip) {
- ut_ad(!index->table->is_temporary());
- ibuf_update_free_bits_zip(block, mtr);
- } else if (!index->table->is_temporary()) {
- ibuf_update_free_bits_low(block, max_ins_size, mtr);
- }
+ return DB_CORRUPTION;
}
if (err != DB_SUCCESS) {
+func_exit:
/* prefetch siblings of the leaf for the pessimistic
operation. */
btr_cur_prefetch_siblings(block, index);
@@ -3807,7 +3566,6 @@ btr_cur_pessimistic_update(
big_rec_t* dummy_big_rec;
dict_index_t* index;
buf_block_t* block;
- page_zip_des_t* page_zip;
rec_t* rec;
page_cur_t* page_cursor;
dberr_t err;
@@ -3820,20 +3578,19 @@ btr_cur_pessimistic_update(
*big_rec = NULL;
block = btr_cur_get_block(cursor);
- page_zip = buf_block_get_page_zip(block);
index = cursor->index();
- ut_ad(index->has_locking());
ut_ad(mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK |
MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
+#if defined UNIV_ZIP_DEBUG || defined UNIV_DEBUG
+ page_zip_des_t* page_zip = buf_block_get_page_zip(block);
+#endif
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip
|| page_zip_validate(page_zip, block->page.frame, index));
#endif /* UNIV_ZIP_DEBUG */
ut_ad(!page_zip || !index->table->is_temporary());
- /* The insert buffer tree should never be updated in place. */
- ut_ad(!dict_index_is_ibuf(index));
ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG)
|| index->table->is_temporary());
ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG)
@@ -3844,7 +3601,7 @@ btr_cur_pessimistic_update(
| BTR_CREATE_FLAG | BTR_KEEP_SYS_FLAG));
err = optim_err = btr_cur_optimistic_update(
- flags | BTR_KEEP_IBUF_BITMAP,
+ flags,
cursor, offsets, offsets_heap, update,
cmpl_info, thr, trx_id, mtr);
@@ -3855,18 +3612,6 @@ btr_cur_pessimistic_update(
break;
default:
err_exit:
- /* We suppressed this with BTR_KEEP_IBUF_BITMAP.
- For DB_ZIP_OVERFLOW, the IBUF_BITMAP_FREE bits were
- already reset by btr_cur_update_alloc_zip() if the
- page was recompressed. */
- if (page_zip
- && optim_err != DB_ZIP_OVERFLOW
- && !dict_index_is_clust(index)
- && page_is_leaf(block->page.frame)) {
- ut_ad(!index->table->is_temporary());
- ibuf_update_free_bits_zip(block, mtr);
- }
-
if (big_rec_vec != NULL) {
dtuple_big_rec_free(big_rec_vec);
}
@@ -3944,11 +3689,6 @@ btr_cur_pessimistic_update(
index->first_user_field())))) {
big_rec_vec = dtuple_convert_big_rec(index, update, new_entry, &n_ext);
if (UNIV_UNLIKELY(big_rec_vec == NULL)) {
-
- /* We cannot goto return_after_reservations,
- because we may need to update the
- IBUF_BITMAP_FREE bits, which was suppressed by
- BTR_KEEP_IBUF_BITMAP. */
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip
|| page_zip_validate(page_zip, block->page.frame,
@@ -3993,11 +3733,6 @@ btr_cur_pessimistic_update(
btr_cur_write_sys(new_entry, index, trx_id, roll_ptr);
}
- const ulint max_ins_size = page_zip
- ? 0
- : page_get_max_insert_size_after_reorganize(block->page.frame,
- 1);
-
if (UNIV_UNLIKELY(is_metadata)) {
ut_ad(new_entry->is_metadata());
ut_ad(index->is_instant());
@@ -4082,18 +3817,6 @@ btr_cur_pessimistic_update(
rec_offs_make_valid(page_cursor->rec, index,
true, *offsets);
}
- } else if (!dict_index_is_clust(index)
- && page_is_leaf(block->page.frame)) {
- /* Update the free bits in the insert buffer.
- This is the same block which was skipped by
- BTR_KEEP_IBUF_BITMAP. */
- if (page_zip) {
- ut_ad(!index->table->is_temporary());
- ibuf_update_free_bits_zip(block, mtr);
- } else if (!index->table->is_temporary()) {
- ibuf_update_free_bits_low(block, max_ins_size,
- mtr);
- }
}
#if 0 // FIXME: this used to be a no-op, and will cause trouble if enabled
@@ -4114,16 +3837,7 @@ btr_cur_pessimistic_update(
of a badly-compressing record, it is possible for
btr_cur_optimistic_update() to return DB_UNDERFLOW and
btr_cur_insert_if_possible() to return FALSE. */
- ut_a(page_zip || optim_err != DB_UNDERFLOW);
-
- /* Out of space: reset the free bits.
- This is the same block which was skipped by
- BTR_KEEP_IBUF_BITMAP. */
- if (!dict_index_is_clust(index)
- && !index->table->is_temporary()
- && page_is_leaf(block->page.frame)) {
- ibuf_reset_free_bits(block);
- }
+ ut_ad(page_zip || optim_err != DB_UNDERFLOW);
}
if (big_rec_vec != NULL) {
@@ -4168,8 +3882,7 @@ btr_cur_pessimistic_update(
same temp-table in parallel.
max_trx_id is ignored for temp tables because it not required
for MVCC. */
- if (dict_index_is_sec_or_ibuf(index)
- && !index->table->is_temporary()) {
+ if (!index->is_primary() && !index->table->is_temporary()) {
/* Update PAGE_MAX_TRX_ID in the index page header.
It was not updated by btr_cur_pessimistic_insert()
because of BTR_NO_LOCKING_FLAG. */
@@ -4480,9 +4193,6 @@ btr_cur_optimistic_delete(
}
{
- page_t* page = buf_block_get_frame(block);
- page_zip_des_t* page_zip= buf_block_get_page_zip(block);
-
if (UNIV_UNLIKELY(rec_get_info_bits(rec, page_rec_is_comp(rec))
& REC_INFO_MIN_REC_FLAG)) {
/* This should be rolling back instant ADD COLUMN.
@@ -4491,7 +4201,7 @@ btr_cur_optimistic_delete(
insert into SYS_COLUMNS is rolled back. */
ut_ad(cursor->index()->table->supports_instant());
ut_ad(cursor->index()->is_primary());
- ut_ad(!page_zip);
+ ut_ad(!buf_block_get_page_zip(block));
page_cur_delete_rec(btr_cur_get_page_cur(cursor),
offsets, mtr);
/* We must empty the PAGE_FREE list, because
@@ -4509,40 +4219,8 @@ btr_cur_optimistic_delete(
btr_search_update_hash_on_delete(cursor);
}
- if (page_zip) {
-#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page,
- cursor->index()));
-#endif /* UNIV_ZIP_DEBUG */
- page_cur_delete_rec(btr_cur_get_page_cur(cursor),
- offsets, mtr);
-#ifdef UNIV_ZIP_DEBUG
- ut_a(page_zip_validate(page_zip, page,
- cursor->index()));
-#endif /* UNIV_ZIP_DEBUG */
-
- /* On compressed pages, the IBUF_BITMAP_FREE
- space is not affected by deleting (purging)
- records, because it is defined as the minimum
- of space available *without* reorganize, and
- space available in the modification log. */
- } else {
- const ulint max_ins
- = page_get_max_insert_size_after_reorganize(
- page, 1);
-
- page_cur_delete_rec(btr_cur_get_page_cur(cursor),
- offsets, mtr);
-
- /* The change buffer does not handle inserts
- into non-leaf pages, into clustered indexes,
- or into the change buffer. */
- if (!cursor->index()->is_clust()
- && !cursor->index()->table->is_temporary()
- && !dict_index_is_ibuf(cursor->index())) {
- ibuf_update_free_bits_low(block, max_ins, mtr);
- }
- }
+ page_cur_delete_rec(btr_cur_get_page_cur(cursor),
+ offsets, mtr);
}
func_exit:
@@ -4738,9 +4416,9 @@ discard_page:
goto err_exit;
}
- btr_cur_t cursor;
- cursor.page_cur.index = index;
- cursor.page_cur.block = block;
+ btr_cur_t cur;
+ cur.page_cur.index = index;
+ cur.page_cur.block = block;
if (!page_has_prev(page)) {
/* If we delete the leftmost node pointer on a
@@ -4756,16 +4434,17 @@ discard_page:
rec_offs* offsets;
ulint len;
- rtr_page_get_father_block(NULL, heap, mtr, NULL,
- &cursor);
- father_rec = btr_cur_get_rec(&cursor);
+ rtr_page_get_father_block(nullptr, heap, nullptr,
+ &cur,
+ cursor->rtr_info->thr, mtr);
+ father_rec = btr_cur_get_rec(&cur);
offsets = rec_get_offsets(father_rec, index, NULL,
0, ULINT_UNDEFINED, &heap);
rtr_read_mbr(rec_get_nth_field(
father_rec, offsets, 0, &len), &father_mbr);
- rtr_update_mbr_field(&cursor, offsets, NULL,
+ rtr_update_mbr_field(&cur, offsets, NULL,
page, &father_mbr, next_rec, mtr);
ut_d(parent_latched = true);
} else {
@@ -4773,12 +4452,12 @@ discard_page:
on a page, we have to change the parent node pointer
so that it is equal to the new leftmost node pointer
on the page */
- ret = btr_page_get_father(mtr, &cursor);
+ ret = btr_page_get_father(mtr, &cur);
if (!ret) {
*err = DB_CORRUPTION;
goto err_exit;
}
- *err = btr_cur_node_ptr_delete(&cursor, mtr);
+ *err = btr_cur_node_ptr_delete(&cur, mtr);
if (*err != DB_SUCCESS) {
got_err:
ret = FALSE;
@@ -4825,7 +4504,10 @@ got_err:
#endif /* UNIV_ZIP_DEBUG */
ut_ad(!parent_latched
- || btr_check_node_ptr(index, block, mtr));
+ || btr_check_node_ptr(index, block,
+ cursor->rtr_info
+ ? cursor->rtr_info->thr
+ : nullptr, mtr));
if (!ret && btr_cur_compress_recommendation(cursor, mtr)) {
if (UNIV_LIKELY(allow_merge)) {
@@ -4970,7 +4652,7 @@ public:
buf_block_t *parent_block= m_block;
ulint parent_savepoint= m_savepoint;
- m_block= btr_block_get(*index(), m_page_id.page_no(), RW_S_LATCH, !level,
+ m_block= btr_block_get(*index(), m_page_id.page_no(), RW_S_LATCH,
&mtr, nullptr);
if (!m_block)
return false;
@@ -5191,8 +4873,7 @@ static ha_rows btr_estimate_n_rows_in_range_on_level(
savepoint= mtr.get_savepoint();
/* Fetch the page. */
- block= btr_block_get(*index, page_id.page_no(), RW_S_LATCH, !level, &mtr,
- nullptr);
+ block= btr_block_get(*index, page_id.page_no(), RW_S_LATCH, &mtr, nullptr);
if (prev_block)
{
@@ -5502,6 +5183,7 @@ search_loop:
DBUG_EXECUTE_IF("bug14007649", DBUG_RETURN(n_rows););
+#ifdef NOT_USED
/* Do not estimate the number of rows in the range to over 1 / 2 of the
estimated rows in the whole table */
@@ -5516,6 +5198,10 @@ search_loop:
if (n_rows == 0)
n_rows= table_n_rows;
}
+#else
+ if (n_rows > table_n_rows)
+ n_rows= table_n_rows;
+#endif
DBUG_RETURN(n_rows);
@@ -5820,7 +5506,7 @@ struct btr_blob_log_check_t {
m_mtr, &err));
}
m_pcur->btr_cur.page_cur.block = btr_block_get(
- *index, page_no, RW_X_LATCH, false, m_mtr);
+ *index, page_no, RW_X_LATCH, m_mtr);
/* The page should not be evicted or corrupted while
we are holding a buffer-fix on it. */
m_pcur->btr_cur.page_cur.block->page.unfix();
diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc
index 642db0e9f1c..2f0b167f655 100644
--- a/storage/innobase/btr/btr0defragment.cc
+++ b/storage/innobase/btr/btr0defragment.cc
@@ -33,7 +33,6 @@ Modified 30/07/2014 Jan Lindström jan.lindstrom@mariadb.com
#include "dict0stats.h"
#include "dict0stats_bg.h"
#include "dict0defrag_bg.h"
-#include "ibuf0ibuf.h"
#include "lock0lock.h"
#include "srv0start.h"
#include "mysqld.h"
@@ -394,20 +393,16 @@ btr_defragment_merge_pages(
// If max_ins_size >= move_size, we can move the records without
// reorganizing the page, otherwise we need to reorganize the page
// first to release more space.
- if (move_size > max_ins_size) {
- dberr_t err = btr_page_reorganize_block(page_zip_level,
- to_block, index, mtr);
- if (err != DB_SUCCESS) {
- if (!dict_index_is_clust(index)
- && page_is_leaf(to_page)) {
- ibuf_reset_free_bits(to_block);
- }
- // If reorganization fails, that means page is
- // not compressable. There's no point to try
- // merging into this page. Continue to the
- // next page.
- return err == DB_FAIL ? from_block : nullptr;
- }
+ if (move_size <= max_ins_size) {
+ } else if (dberr_t err = btr_page_reorganize_block(page_zip_level,
+ to_block, index,
+ mtr)) {
+ // If reorganization fails, that means page is
+ // not compressable. There's no point to try
+ // merging into this page. Continue to the
+ // next page.
+ return err == DB_FAIL ? from_block : nullptr;
+ } else {
ut_ad(page_validate(to_page, index));
max_ins_size = page_get_max_insert_size(to_page, n_recs);
if (max_ins_size < move_size) {
@@ -456,18 +451,6 @@ btr_defragment_merge_pages(
&& *max_data_size > new_data_size + move_size) {
*max_data_size = new_data_size + move_size;
}
- // Set ibuf free bits if necessary.
- if (!dict_index_is_clust(index)
- && page_is_leaf(to_page)) {
- if (zip_size) {
- ibuf_reset_free_bits(to_block);
- } else {
- ibuf_update_free_bits_if_full(
- to_block,
- srv_page_size,
- ULINT_UNDEFINED);
- }
- }
btr_cur_t parent;
parent.page_cur.index = index;
parent.page_cur.block = from_block;
@@ -590,8 +573,7 @@ btr_defragment_n_pages(
break;
}
- blocks[i] = btr_block_get(*index, page_no, RW_X_LATCH, true,
- mtr);
+ blocks[i] = btr_block_get(*index, page_no, RW_X_LATCH, mtr);
if (!blocks[i]) {
return nullptr;
}
@@ -606,7 +588,7 @@ btr_defragment_n_pages(
/* given page is the last page.
Lift the records to father. */
dberr_t err;
- btr_lift_page_up(index, block, mtr, &err);
+ btr_lift_page_up(index, block, nullptr, mtr, &err);
}
return NULL;
}
diff --git a/storage/innobase/btr/btr0pcur.cc b/storage/innobase/btr/btr0pcur.cc
index 1dd26f8c467..c3309085c46 100644
--- a/storage/innobase/btr/btr0pcur.cc
+++ b/storage/innobase/btr/btr0pcur.cc
@@ -157,20 +157,14 @@ before_first:
cursor->rel_pos = BTR_PCUR_ON;
}
- if (index->is_ibuf()) {
- ut_ad(!index->table->not_redundant());
- cursor->old_n_fields = uint16_t(rec_get_n_fields_old(rec));
- } else {
- cursor->old_n_fields = static_cast<uint16>(
- dict_index_get_n_unique_in_tree(index));
- if (index->is_spatial() && !page_rec_is_leaf(rec)) {
- ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index)
- == DICT_INDEX_SPATIAL_NODEPTR_SIZE);
- /* For R-tree, we have to compare
- the child page numbers as well. */
- cursor->old_n_fields
- = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1;
- }
+ cursor->old_n_fields = static_cast<uint16>(
+ dict_index_get_n_unique_in_tree(index));
+ if (index->is_spatial() && !page_rec_is_leaf(rec)) {
+ ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index)
+ == DICT_INDEX_SPATIAL_NODEPTR_SIZE);
+ /* For R-tree, we have to compare
+ the child page numbers as well. */
+ cursor->old_n_fields = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1;
}
cursor->old_n_core_fields = index->n_core_fields;
@@ -541,7 +535,7 @@ btr_pcur_move_to_next_page(
dberr_t err;
buf_block_t* next_block = btr_block_get(
*cursor->index(), next_page_no, cursor->latch_mode & ~12,
- page_is_leaf(page), mtr, &err);
+ mtr, &err);
if (UNIV_UNLIKELY(!next_block)) {
return err;
diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc
index 300276ff3a6..eeb39545360 100644
--- a/storage/innobase/btr/btr0sea.cc
+++ b/storage/innobase/btr/btr0sea.cc
@@ -2,7 +2,7 @@
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -305,13 +305,6 @@ static void btr_search_info_update_hash(btr_search_t *info, btr_cur_t *cursor)
dict_index_t* index = cursor->index();
int cmp;
- if (dict_index_is_ibuf(index)) {
- /* So many deletes are performed on an insert buffer tree
- that we do not consider a hash index useful on it: */
-
- return;
- }
-
uint16_t n_unique = dict_index_get_n_unique_in_tree(index);
if (info->n_hash_potential == 0) {
@@ -712,7 +705,6 @@ btr_search_update_hash_ref(
ut_ad(block->page.id().space() == index->table->space_id);
ut_ad(index == cursor->index());
- ut_ad(!dict_index_is_ibuf(index));
auto part = btr_search_sys.get_part(*index);
part->latch.wr_lock(SRW_LOCK_CALL);
ut_ad(!block->index || block->index == index);
@@ -1057,7 +1049,7 @@ btr_search_guess_on_hash(
index_id_t index_id;
ut_ad(mtr->is_active());
- ut_ad(index->is_btree() || index->is_ibuf());
+ ut_ad(index->is_btree());
/* Note that, for efficiency, the struct info may not be protected by
any latch here! */
@@ -1267,7 +1259,6 @@ retry:
ut_ad(block->page.id().space() == index->table->space_id);
ut_a(index_id == index->id);
- ut_ad(!dict_index_is_ibuf(index));
n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes;
@@ -1470,7 +1461,6 @@ btr_search_build_page_hash_index(
ut_ad(ahi_latch == &btr_search_sys.get_part(*index)->latch);
ut_ad(index);
ut_ad(block->page.id().space() == index->table->space_id);
- ut_ad(!dict_index_is_ibuf(index));
ut_ad(page_is_leaf(block->page.frame));
ut_ad(block->page.lock.have_x() || block->page.lock.have_s());
@@ -1796,7 +1786,6 @@ void btr_search_update_hash_on_delete(btr_cur_t *cursor)
ut_ad(block->page.id().space() == index->table->space_id);
ut_a(index == cursor->index());
ut_a(block->curr_n_fields > 0 || block->curr_n_bytes > 0);
- ut_ad(!dict_index_is_ibuf(index));
rec = btr_cur_get_rec(cursor);
@@ -1869,7 +1858,6 @@ void btr_search_update_hash_node_on_insert(btr_cur_t *cursor,
}
ut_a(cursor->index() == index);
- ut_ad(!dict_index_is_ibuf(index));
ahi_latch->wr_lock(SRW_LOCK_CALL);
if (!block->index || !btr_search_enabled) {
@@ -1962,7 +1950,6 @@ drop:
}
ut_a(index == cursor->index());
- ut_ad(!dict_index_is_ibuf(index));
n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes;
@@ -2211,7 +2198,6 @@ btr_search_hash_table_validate(ulint hash_table_id)
invokes btr_search_drop_page_hash_index(). */
ut_a(block->page.state() == buf_page_t::REMOVE_HASH);
state_ok:
- ut_ad(!dict_index_is_ibuf(block->index));
ut_ad(block->page.id().space()
== block->index->table->space_id);
diff --git a/storage/innobase/buf/buf0buddy.cc b/storage/innobase/buf/buf0buddy.cc
index 85a698bc875..f43c6672a95 100644
--- a/storage/innobase/buf/buf0buddy.cc
+++ b/storage/innobase/buf/buf0buddy.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2018, 2021, MariaDB Corporation.
+Copyright (c) 2018, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -454,7 +454,7 @@ byte *buf_buddy_alloc_low(ulint i, bool *lru)
}
/* Try replacing an uncompressed page in the buffer pool. */
- block = buf_LRU_get_free_block(true);
+ block = buf_LRU_get_free_block(have_mutex);
if (lru) {
*lru = true;
}
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 5339f913496..4de8b4fd175 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2018, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -50,7 +50,6 @@ Created 11/5/1995 Heikki Tuuri
#include "buf0dblwr.h"
#include "lock0lock.h"
#include "btr0sea.h"
-#include "ibuf0ibuf.h"
#include "trx0undo.h"
#include "trx0purge.h"
#include "log0log.h"
@@ -586,7 +585,7 @@ bool buf_page_is_corrupted(bool check_lsn, const byte *read_buf,
DBUG_EXECUTE_IF(
"page_intermittent_checksum_mismatch", {
static int page_counter;
- if (page_counter++ == 3) {
+ if (page_counter++ == 6) {
crc32++;
}
});
@@ -721,7 +720,8 @@ bool buf_page_is_corrupted(bool check_lsn, const byte *read_buf,
DBUG_EXECUTE_IF(
"page_intermittent_checksum_mismatch", {
static int page_counter;
- if (page_counter++ == 3) return true;
+ if (page_counter++ == 6)
+ return true;
});
if ((checksum_field1 != crc32
@@ -1856,9 +1856,6 @@ calc_buf_pool_size:
" and dictionary.";
}
- /* normalize ibuf.max_size */
- ibuf_max_size_update(srv_change_buffer_max_size);
-
if (srv_buf_pool_old_size != srv_buf_pool_size) {
buf_resize_status("Completed resizing buffer pool from %zu to %zu bytes."
@@ -1932,7 +1929,6 @@ static void buf_relocate(buf_page_t *bpage, buf_page_t *dpage)
mysql_mutex_assert_owner(&buf_pool.mutex);
ut_ad(buf_pool.page_hash.lock_get(chain).is_write_locked());
ut_ad(bpage == buf_pool.page_hash.get(id, chain));
- ut_ad(!buf_pool.watch_is_sentinel(*bpage));
ut_d(const auto state= bpage->state());
ut_ad(state >= buf_page_t::FREED);
ut_ad(state <= buf_page_t::READ_FIX);
@@ -1976,127 +1972,6 @@ static void buf_relocate(buf_page_t *bpage, buf_page_t *dpage)
buf_pool.page_hash.replace(chain, bpage, dpage);
}
-buf_page_t *buf_pool_t::watch_set(const page_id_t id,
- buf_pool_t::hash_chain &chain)
-{
- ut_ad(&chain == &page_hash.cell_get(id.fold()));
- page_hash.lock_get(chain).lock();
-
- buf_page_t *bpage= page_hash.get(id, chain);
-
- if (bpage)
- {
-got_block:
- bpage->fix();
- if (watch_is_sentinel(*bpage))
- bpage= nullptr;
- page_hash.lock_get(chain).unlock();
- return bpage;
- }
-
- page_hash.lock_get(chain).unlock();
- /* Allocate a watch[] and then try to insert it into the page_hash. */
- mysql_mutex_lock(&mutex);
-
- /* The maximum number of purge tasks should never exceed
- the UT_ARR_SIZE(watch) - 1, and there is no way for a purge task to hold a
- watch when setting another watch. */
- for (buf_page_t *w= &watch[UT_ARR_SIZE(watch)]; w-- >= watch; )
- {
- ut_ad(w->access_time == 0);
- ut_ad(!w->oldest_modification());
- ut_ad(!w->zip.data);
- ut_ad(!w->in_zip_hash);
- static_assert(buf_page_t::NOT_USED == 0, "efficiency");
- if (ut_d(auto s=) w->state())
- {
- /* This watch may be in use for some other page. */
- ut_ad(s >= buf_page_t::UNFIXED);
- continue;
- }
- /* w is pointing to watch[], which is protected by mutex.
- Normally, buf_page_t::id for objects that are reachable by
- page_hash.get(id, chain) are protected by hash_lock. */
- w->set_state(buf_page_t::UNFIXED + 1);
- w->id_= id;
-
- page_hash.lock_get(chain).lock();
- bpage= page_hash.get(id, chain);
- if (UNIV_LIKELY_NULL(bpage))
- {
- w->set_state(buf_page_t::NOT_USED);
- mysql_mutex_unlock(&mutex);
- goto got_block;
- }
-
- ut_ad(w->state() == buf_page_t::UNFIXED + 1);
- buf_pool.page_hash.append(chain, w);
- mysql_mutex_unlock(&mutex);
- page_hash.lock_get(chain).unlock();
- return nullptr;
- }
-
- ut_error;
-}
-
-/** Stop watching whether a page has been read in.
-watch_set(id) must have returned nullptr before.
-@param id page identifier
-@param chain unlocked hash table chain */
-TRANSACTIONAL_TARGET
-void buf_pool_t::watch_unset(const page_id_t id, buf_pool_t::hash_chain &chain)
-{
- mysql_mutex_assert_not_owner(&mutex);
- buf_page_t *w;
- {
- transactional_lock_guard<page_hash_latch> g{page_hash.lock_get(chain)};
- /* The page must exist because watch_set() did fix(). */
- w= page_hash.get(id, chain);
- ut_ad(w->in_page_hash);
- if (!watch_is_sentinel(*w))
- {
- no_watch:
- w->unfix();
- w= nullptr;
- }
- else
- {
- const auto state= w->state();
- ut_ad(~buf_page_t::LRU_MASK & state);
- ut_ad(state >= buf_page_t::UNFIXED + 1);
- if (state != buf_page_t::UNFIXED + 1)
- goto no_watch;
- }
- }
-
- if (!w)
- return;
-
- const auto old= w;
- /* The following is based on buf_pool_t::watch_remove(). */
- mysql_mutex_lock(&mutex);
- w= page_hash.get(id, chain);
-
- {
- transactional_lock_guard<page_hash_latch> g
- {buf_pool.page_hash.lock_get(chain)};
- auto f= w->unfix();
- ut_ad(f < buf_page_t::READ_FIX || w != old);
-
- if (f == buf_page_t::UNFIXED && w == old)
- {
- page_hash.remove(chain, w);
- // Now that w is detached from page_hash, release it to watch[].
- ut_ad(w->id_ == id);
- ut_ad(!w->frame);
- ut_ad(!w->zip.data);
- w->set_state(buf_page_t::NOT_USED);
- }
- }
-
- mysql_mutex_unlock(&mutex);
-}
-
/** Mark the page status as FREED for the given tablespace and page number.
@param[in,out] space tablespace
@param[in] page page number
@@ -2178,7 +2053,7 @@ lookup:
if (hash_lock.is_locked())
xabort();
bpage= buf_pool.page_hash.get(page_id, chain);
- if (!bpage || buf_pool.watch_is_sentinel(*bpage))
+ if (!bpage)
{
xend();
goto must_read_page;
@@ -2203,7 +2078,7 @@ lookup:
{
hash_lock.lock_shared();
bpage= buf_pool.page_hash.get(page_id, chain);
- if (!bpage || buf_pool.watch_is_sentinel(*bpage))
+ if (!bpage)
{
hash_lock.unlock_shared();
goto must_read_page;
@@ -2256,7 +2131,7 @@ lookup:
return bpage;
must_read_page:
- switch (dberr_t err= buf_read_page(page_id, zip_size)) {
+ switch (dberr_t err= buf_read_page(page_id, zip_size, chain)) {
case DB_SUCCESS:
case DB_SUCCESS_LOCKED_REC:
goto lookup;
@@ -2378,13 +2253,9 @@ err_exit:
@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH
@param[in] guess guessed block or NULL
@param[in] mode BUF_GET, BUF_GET_IF_IN_POOL,
-BUF_PEEK_IF_IN_POOL, or BUF_GET_IF_IN_POOL_OR_WATCH
+or BUF_PEEK_IF_IN_POOL
@param[in] mtr mini-transaction
@param[out] err DB_SUCCESS or error code
-@param[in] allow_ibuf_merge Allow change buffer merge to happen
-while reading the page from file
-then it makes sure that it does merging of change buffer changes while
-reading the page from file.
@return pointer to the block or NULL */
TRANSACTIONAL_TARGET
buf_block_t*
@@ -2395,10 +2266,8 @@ buf_page_get_low(
buf_block_t* guess,
ulint mode,
mtr_t* mtr,
- dberr_t* err,
- bool allow_ibuf_merge)
+ dberr_t* err)
{
- unsigned access_time;
ulint retries = 0;
ut_ad(!mtr || mtr->is_active());
@@ -2415,7 +2284,6 @@ buf_page_get_low(
#ifdef UNIV_DEBUG
switch (mode) {
default:
- ut_ad(!allow_ibuf_merge);
ut_ad(mode == BUF_PEEK_IF_IN_POOL);
break;
case BUF_GET_POSSIBLY_FREED:
@@ -2424,7 +2292,6 @@ buf_page_get_low(
because it does not really matter. */
break;
case BUF_GET:
- case BUF_GET_IF_IN_POOL_OR_WATCH:
ut_ad(!mtr->is_freeing_tree());
fil_space_t* s = fil_space_get(page_id.space());
ut_ad(s);
@@ -2432,9 +2299,6 @@ buf_page_get_low(
}
#endif /* UNIV_DEBUG */
- ut_ad(!mtr || !ibuf_inside(mtr)
- || ibuf_page_low(page_id, zip_size, FALSE, NULL));
-
++buf_pool.stat.n_page_gets;
auto& chain= buf_pool.page_hash.cell_get(page_id.fold());
@@ -2467,8 +2331,7 @@ loop:
hash_lock.lock_shared();
block = reinterpret_cast<buf_block_t*>(
buf_pool.page_hash.get(page_id, chain));
- if (UNIV_LIKELY(block
- && !buf_pool.watch_is_sentinel(block->page))) {
+ if (UNIV_LIKELY(block != nullptr)) {
state = block->page.fix();
hash_lock.unlock_shared();
goto got_block;
@@ -2480,17 +2343,6 @@ loop:
case BUF_GET_IF_IN_POOL:
case BUF_PEEK_IF_IN_POOL:
return nullptr;
- case BUF_GET_IF_IN_POOL_OR_WATCH:
- /* Buffer-fixing inside watch_set() will prevent eviction */
- block = reinterpret_cast<buf_block_t*>
- (buf_pool.watch_set(page_id, chain));
-
- if (block) {
- state = block->page.state();
- goto got_block_fixed;
- }
-
- return nullptr;
}
/* The call path is buf_read_page() ->
@@ -2504,10 +2356,10 @@ loop:
corrupted, or if an encrypted page with a valid
checksum cannot be decypted. */
- switch (dberr_t local_err = buf_read_page(page_id, zip_size)) {
+ switch (dberr_t local_err = buf_read_page(page_id, zip_size, chain)) {
case DB_SUCCESS:
case DB_SUCCESS_LOCKED_REC:
- buf_read_ahead_random(page_id, zip_size, ibuf_inside(mtr));
+ buf_read_ahead_random(page_id, zip_size);
break;
default:
if (mode != BUF_GET_POSSIBLY_FREED
@@ -2529,7 +2381,6 @@ loop:
got_block:
ut_ad(!block->page.in_zip_hash);
state++;
-got_block_fixed:
ut_ad(state > buf_page_t::FREED);
if (state > buf_page_t::READ_FIX && state < buf_page_t::WRITE_FIX) {
@@ -2606,7 +2457,7 @@ wait_for_unzip:
goto loop;
}
- buf_block_t *new_block = buf_LRU_get_free_block(false);
+ buf_block_t *new_block = buf_LRU_get_free_block(have_no_mutex);
buf_block_init_low(new_block);
wait_for_unfix:
@@ -2630,7 +2481,6 @@ wait_for_unfix:
switch (state) {
case buf_page_t::UNFIXED + 1:
- case buf_page_t::IBUF_EXIST + 1:
case buf_page_t::REINIT + 1:
break;
default:
@@ -2684,13 +2534,6 @@ wait_for_unfix:
buf_pool.n_pend_unzip++;
- access_time = block->page.is_accessed();
-
- if (!access_time && !recv_no_ibuf_operations
- && ibuf_page_exists(block->page.id(), block->zip_size())) {
- state = buf_page_t::IBUF_EXIST + 1;
- }
-
/* Decompress the page while not holding
buf_pool.mutex. */
const auto ok = buf_zip_decompress(block, false);
@@ -2709,63 +2552,6 @@ wait_for_unfix:
}
}
-#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
-re_evict:
- if (mode != BUF_GET_IF_IN_POOL
- && mode != BUF_GET_IF_IN_POOL_OR_WATCH) {
- } else if (!ibuf_debug || recv_recovery_is_on()) {
- } else if (fil_space_t* space = fil_space_t::get(page_id.space())) {
- for (ulint i = 0; i < mtr->get_savepoint(); i++) {
- if (buf_block_t* b = mtr->block_at_savepoint(i)) {
- if (b->page.oldest_modification() > 2
- && b->page.lock.have_any()) {
- /* We are holding a dirty page latch
- that would hang buf_flush_sync(). */
- space->release();
- goto re_evict_fail;
- }
- }
- }
-
- /* Try to evict the block from the buffer pool, to use the
- insert buffer (change buffer) as much as possible. */
-
- mysql_mutex_lock(&buf_pool.mutex);
-
- block->unfix();
-
- /* Blocks cannot be relocated or enter or exit the
- buf_pool while we are holding the buf_pool.mutex. */
- const bool evicted = buf_LRU_free_page(&block->page, true);
- space->release();
-
- if (!evicted) {
- block->fix();
- }
-
- mysql_mutex_unlock(&buf_pool.mutex);
-
- if (evicted) {
- if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
- buf_pool.watch_set(page_id, chain);
- }
- return(NULL);
- }
-
- buf_flush_sync();
-
- state = block->page.state();
-
- if (state == buf_page_t::UNFIXED + 1
- && !block->page.oldest_modification()) {
- goto re_evict;
- }
-
- /* Failed to evict the page; change it directly */
- }
-re_evict_fail:
-#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
-
if (UNIV_UNLIKELY(state < buf_page_t::UNFIXED)) {
goto ignore_block;
}
@@ -2777,112 +2563,64 @@ re_evict_fail:
#endif /* UNIV_DEBUG */
ut_ad(block->page.frame);
- if (state >= buf_page_t::UNFIXED
- && allow_ibuf_merge
- && fil_page_get_type(block->page.frame) == FIL_PAGE_INDEX
- && page_is_leaf(block->page.frame)) {
- block->page.lock.x_lock();
- ut_ad(block->page.id() == page_id
- || (state >= buf_page_t::READ_FIX
- && state < buf_page_t::WRITE_FIX));
-
-#ifdef BTR_CUR_HASH_ADAPT
- btr_search_drop_page_hash_index(block, true);
-#endif /* BTR_CUR_HASH_ADAPT */
-
- dberr_t e;
-
+ switch (rw_latch) {
+ case RW_NO_LATCH:
+ mtr->memo_push(block, MTR_MEMO_BUF_FIX);
+ return block;
+ case RW_S_LATCH:
+ block->page.lock.s_lock();
+ ut_ad(!block->page.is_read_fixed());
if (UNIV_UNLIKELY(block->page.id() != page_id)) {
+ block->page.lock.s_unlock();
+ block->page.lock.x_lock();
page_id_mismatch:
- state = block->page.state();
- e = DB_CORRUPTION;
-ibuf_merge_corrupted:
- if (err) {
- *err = e;
- }
-
if (block->page.id().is_corrupted()) {
- buf_pool.corrupted_evict(&block->page, state);
+ buf_pool.corrupted_evict(&block->page,
+ block->page.state());
}
- return nullptr;
- }
-
- state = block->page.state();
- ut_ad(state < buf_page_t::READ_FIX);
-
- if (state >= buf_page_t::IBUF_EXIST
- && state < buf_page_t::REINIT) {
- block->page.clear_ibuf_exist();
- e = ibuf_merge_or_delete_for_page(block, page_id,
- block->zip_size());
- if (UNIV_UNLIKELY(e != DB_SUCCESS)) {
- goto ibuf_merge_corrupted;
+ if (err) {
+ *err = DB_CORRUPTION;
}
+ return nullptr;
}
-
- if (rw_latch == RW_X_LATCH) {
- goto get_latch_valid;
- } else {
- block->page.lock.x_unlock();
- goto get_latch;
+ break;
+ case RW_SX_LATCH:
+ block->page.lock.u_lock();
+ ut_ad(!block->page.is_io_fixed());
+ if (UNIV_UNLIKELY(block->page.id() != page_id)) {
+ block->page.lock.u_x_upgrade();
+ goto page_id_mismatch;
}
- } else {
-get_latch:
- switch (rw_latch) {
- case RW_NO_LATCH:
- mtr->memo_push(block, MTR_MEMO_BUF_FIX);
+ break;
+ default:
+ ut_ad(rw_latch == RW_X_LATCH);
+ if (block->page.lock.x_lock_upgraded()) {
+ ut_ad(block->page.id() == page_id);
+ block->unfix();
+ mtr->page_lock_upgrade(*block);
return block;
- case RW_S_LATCH:
- block->page.lock.s_lock();
- ut_ad(!block->page.is_read_fixed());
- if (UNIV_UNLIKELY(block->page.id() != page_id)) {
- block->page.lock.s_unlock();
- block->page.lock.x_lock();
- goto page_id_mismatch;
- }
-get_latch_valid:
- mtr->memo_push(block, mtr_memo_type_t(rw_latch));
+ }
+ if (UNIV_UNLIKELY(block->page.id() != page_id)) {
+ goto page_id_mismatch;
+ }
+ }
+
+ mtr->memo_push(block, mtr_memo_type_t(rw_latch));
#ifdef BTR_CUR_HASH_ADAPT
- btr_search_drop_page_hash_index(block, true);
+ btr_search_drop_page_hash_index(block, true);
#endif /* BTR_CUR_HASH_ADAPT */
- break;
- case RW_SX_LATCH:
- block->page.lock.u_lock();
- ut_ad(!block->page.is_io_fixed());
- if (UNIV_UNLIKELY(block->page.id() != page_id)) {
- block->page.lock.u_x_upgrade();
- goto page_id_mismatch;
- }
- goto get_latch_valid;
- default:
- ut_ad(rw_latch == RW_X_LATCH);
- if (block->page.lock.x_lock_upgraded()) {
- ut_ad(block->page.id() == page_id);
- block->unfix();
- mtr->page_lock_upgrade(*block);
- return block;
- }
- if (UNIV_UNLIKELY(block->page.id() != page_id)) {
- goto page_id_mismatch;
- }
- goto get_latch_valid;
- }
- ut_ad(page_id_t(page_get_space_id(block->page.frame),
- page_get_page_no(block->page.frame))
- == page_id);
+ ut_ad(page_id_t(page_get_space_id(block->page.frame),
+ page_get_page_no(block->page.frame)) == page_id);
- if (mode == BUF_GET_POSSIBLY_FREED
- || mode == BUF_PEEK_IF_IN_POOL) {
- return block;
- }
+ if (mode == BUF_GET_POSSIBLY_FREED || mode == BUF_PEEK_IF_IN_POOL) {
+ return block;
+ }
- const bool not_first_access{block->page.set_accessed()};
- buf_page_make_young_if_needed(&block->page);
- if (!not_first_access) {
- buf_read_ahead_linear(page_id, block->zip_size(),
- ibuf_inside(mtr));
- }
+ const bool not_first_access{block->page.set_accessed()};
+ buf_page_make_young_if_needed(&block->page);
+ if (!not_first_access) {
+ buf_read_ahead_linear(page_id, block->zip_size());
}
return block;
@@ -2894,11 +2632,9 @@ get_latch_valid:
@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH
@param[in] guess guessed block or NULL
@param[in] mode BUF_GET, BUF_GET_IF_IN_POOL,
-BUF_PEEK_IF_IN_POOL, or BUF_GET_IF_IN_POOL_OR_WATCH
+or BUF_PEEK_IF_IN_POOL
@param[in,out] mtr mini-transaction, or NULL
@param[out] err DB_SUCCESS or error code
-@param[in] allow_ibuf_merge Allow change buffer merge while
-reading the pages from file.
@return pointer to the block or NULL */
buf_block_t*
buf_page_get_gen(
@@ -2908,13 +2644,12 @@ buf_page_get_gen(
buf_block_t* guess,
ulint mode,
mtr_t* mtr,
- dberr_t* err,
- bool allow_ibuf_merge)
+ dberr_t* err)
{
buf_block_t *block= recv_sys.recover(page_id);
if (UNIV_LIKELY(!block))
return buf_page_get_low(page_id, zip_size, rw_latch,
- guess, mode, mtr, err, allow_ibuf_merge);
+ guess, mode, mtr, err);
else if (UNIV_UNLIKELY(block == reinterpret_cast<buf_block_t*>(-1)))
{
corrupted:
@@ -2922,19 +2657,16 @@ buf_page_get_gen(
*err= DB_CORRUPTION;
return nullptr;
}
+ if (err)
+ *err= DB_SUCCESS;
/* Recovery is a special case; we fix() before acquiring lock. */
auto s= block->page.fix();
ut_ad(s >= buf_page_t::FREED);
/* The block may be write-fixed at this point because we are not
holding a lock, but it must not be read-fixed. */
ut_ad(s < buf_page_t::READ_FIX || s >= buf_page_t::WRITE_FIX);
- if (err)
- *err= DB_SUCCESS;
- const bool must_merge= allow_ibuf_merge &&
- ibuf_page_exists(page_id, block->zip_size());
if (s < buf_page_t::UNFIXED)
{
- got_freed_page:
ut_ad(mode == BUF_GET_POSSIBLY_FREED || mode == BUF_PEEK_IF_IN_POOL);
mysql_mutex_lock(&buf_pool.mutex);
block->page.unfix();
@@ -2942,40 +2674,7 @@ buf_page_get_gen(
mysql_mutex_unlock(&buf_pool.mutex);
goto corrupted;
}
- else if (must_merge &&
- fil_page_get_type(block->page.frame) == FIL_PAGE_INDEX &&
- page_is_leaf(block->page.frame))
- {
- block->page.lock.x_lock();
- s= block->page.state();
- ut_ad(s > buf_page_t::FREED);
- ut_ad(s < buf_page_t::READ_FIX);
- if (s < buf_page_t::UNFIXED)
- {
- block->page.lock.x_unlock();
- goto got_freed_page;
- }
- else
- {
- if (block->page.is_ibuf_exist())
- block->page.clear_ibuf_exist();
- if (dberr_t e=
- ibuf_merge_or_delete_for_page(block, page_id, block->zip_size()))
- {
- if (err)
- *err= e;
- buf_pool.corrupted_evict(&block->page, s);
- return nullptr;
- }
- }
- if (rw_latch == RW_X_LATCH)
- {
- mtr->memo_push(block, MTR_MEMO_PAGE_X_FIX);
- return block;
- }
- block->page.lock.x_unlock();
- }
mtr->page_lock(block, rw_latch);
return block;
}
@@ -3042,7 +2741,6 @@ bool buf_page_optimistic_get(ulint rw_latch, buf_block_t *block,
{
ut_ad(rw_latch == RW_S_LATCH || !block->page.is_io_fixed());
ut_ad(id == block->page.id());
- ut_ad(!ibuf_inside(mtr) || ibuf_page(id, block->zip_size(), nullptr));
if (modify_clock != block->modify_clock || block->page.is_freed())
{
@@ -3137,12 +2835,11 @@ retry:
buf_page_t *bpage= buf_pool.page_hash.get(page_id, chain);
- if (bpage && !buf_pool.watch_is_sentinel(*bpage))
+ if (bpage)
{
#ifdef BTR_CUR_HASH_ADAPT
const dict_index_t *drop_hash_entry= nullptr;
#endif
- bool ibuf_exist= false;
if (!mtr->have_x_latch(reinterpret_cast<const buf_block_t&>(*bpage)))
{
@@ -3168,10 +2865,7 @@ retry:
if (state < buf_page_t::UNFIXED)
bpage->set_reinit(buf_page_t::FREED);
else
- {
bpage->set_reinit(state & buf_page_t::LRU_MASK);
- ibuf_exist= (state & buf_page_t::LRU_MASK) == buf_page_t::IBUF_EXIST;
- }
if (UNIV_LIKELY(bpage->frame != nullptr))
{
@@ -3197,10 +2891,7 @@ retry:
if (state < buf_page_t::UNFIXED)
bpage->set_reinit(buf_page_t::FREED);
else
- {
bpage->set_reinit(state & buf_page_t::LRU_MASK);
- ibuf_exist= (state & buf_page_t::LRU_MASK) == buf_page_t::IBUF_EXIST;
- }
mysql_mutex_lock(&buf_pool.flush_list_mutex);
buf_relocate(bpage, &free_block->page);
@@ -3240,9 +2931,6 @@ retry:
false);
#endif /* BTR_CUR_HASH_ADAPT */
- if (ibuf_exist && !recv_recovery_is_on())
- ibuf_merge_or_delete_for_page(nullptr, page_id, zip_size);
-
return reinterpret_cast<buf_block_t*>(bpage);
}
@@ -3283,13 +2971,6 @@ retry:
bpage->set_accessed();
- /* Delete possible entries for the page from the insert buffer:
- such can exist if the page belonged to an index which was dropped */
- if (page_id < page_id_t{SRV_SPACE_ID_UPPER_BOUND, 0} &&
- !srv_is_undo_tablespace(page_id.space()) &&
- !recv_recovery_is_on())
- ibuf_merge_or_delete_for_page(nullptr, page_id, zip_size);
-
static_assert(FIL_PAGE_PREV + 4 == FIL_PAGE_NEXT, "adjacent");
memset_aligned<8>(bpage->frame + FIL_PAGE_PREV, 0xff, 8);
mach_write_to_2(bpage->frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED);
@@ -3353,32 +3034,15 @@ ATTRIBUTE_COLD void buf_page_monitor(const buf_page_t &bpage, bool read)
const byte* frame = bpage.zip.data ? bpage.zip.data : bpage.frame;
switch (fil_page_get_type(frame)) {
- ulint level;
case FIL_PAGE_TYPE_INSTANT:
case FIL_PAGE_INDEX:
case FIL_PAGE_RTREE:
- level = btr_page_get_level(frame);
-
- /* Check if it is an index page for insert buffer */
- if (fil_page_get_type(frame) == FIL_PAGE_INDEX
- && btr_page_get_index_id(frame)
- == (index_id_t)(DICT_IBUF_ID_MIN + IBUF_SPACE_ID)) {
- if (level == 0) {
- counter = MONITOR_RW_COUNTER(
- read, MONITOR_INDEX_IBUF_LEAF_PAGE);
- } else {
- counter = MONITOR_RW_COUNTER(
- read,
- MONITOR_INDEX_IBUF_NON_LEAF_PAGE);
- }
+ if (page_is_leaf(frame)) {
+ counter = MONITOR_RW_COUNTER(
+ read, MONITOR_INDEX_LEAF_PAGE);
} else {
- if (level == 0) {
- counter = MONITOR_RW_COUNTER(
- read, MONITOR_INDEX_LEAF_PAGE);
- } else {
- counter = MONITOR_RW_COUNTER(
- read, MONITOR_INDEX_NON_LEAF_PAGE);
- }
+ counter = MONITOR_RW_COUNTER(
+ read, MONITOR_INDEX_NON_LEAF_PAGE);
}
break;
@@ -3390,14 +3054,6 @@ ATTRIBUTE_COLD void buf_page_monitor(const buf_page_t &bpage, bool read)
counter = MONITOR_RW_COUNTER(read, MONITOR_INODE_PAGE);
break;
- case FIL_PAGE_IBUF_FREE_LIST:
- counter = MONITOR_RW_COUNTER(read, MONITOR_IBUF_FREELIST_PAGE);
- break;
-
- case FIL_PAGE_IBUF_BITMAP:
- counter = MONITOR_RW_COUNTER(read, MONITOR_IBUF_BITMAP_PAGE);
- break;
-
case FIL_PAGE_TYPE_SYS:
counter = MONITOR_RW_COUNTER(read, MONITOR_SYSTEM_PAGE);
break;
@@ -3613,41 +3269,30 @@ database_corrupted:
<< FORCE_RECOVERY_MSG;
}
- if (!srv_force_recovery)
- goto release_page;
- }
-
- if (err == DB_PAGE_CORRUPTED || err == DB_DECRYPTION_FAILED)
- {
+ if (err == DB_PAGE_CORRUPTED || err == DB_DECRYPTION_FAILED ||
+ !srv_force_recovery)
+ {
release_page:
- buf_pool.corrupted_evict(this, buf_page_t::READ_FIX);
- return err;
+ buf_pool.corrupted_evict(this, buf_page_t::READ_FIX);
+ return err;
+ }
}
- const bool recovery= recv_recovery_is_on();
+ const bool recovery= frame && recv_recovery_is_on();
if (recovery && !recv_recover_page(node.space, this))
return DB_PAGE_CORRUPTED;
- const bool ibuf_may_exist= frame && !recv_no_ibuf_operations &&
- (!expected_id.space() || !is_predefined_tablespace(expected_id.space())) &&
- fil_page_get_type(read_frame) == FIL_PAGE_INDEX &&
- page_is_leaf(read_frame);
-
if (UNIV_UNLIKELY(MONITOR_IS_ON(MONITOR_MODULE_BUF_PAGE)))
buf_page_monitor(*this, true);
DBUG_PRINT("ib_buf", ("read page %u:%u", id().space(), id().page_no()));
if (!recovery)
{
- ut_d(auto f=) zip.fix.fetch_sub(ibuf_may_exist
- ? READ_FIX - IBUF_EXIST
- : READ_FIX - UNFIXED);
+ ut_d(auto f=) zip.fix.fetch_sub(READ_FIX - UNFIXED);
ut_ad(f >= READ_FIX);
ut_ad(f < WRITE_FIX);
}
- else if (ibuf_may_exist)
- set_ibuf_exist();
lock.x_unlock(true);
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 5ec9c5ca2e1..569096377c0 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
Copyright (c) 2013, 2014, Fusion-io
This program is free software; you can redistribute it and/or modify it under
@@ -873,7 +873,7 @@ static bool buf_flush_check_neighbor(const page_id_t id, ulint fold,
const buf_page_t *bpage=
buf_pool.page_hash.get(id, buf_pool.page_hash.cell_get(fold));
- if (!bpage || buf_pool.watch_is_sentinel(*bpage))
+ if (!bpage)
return false;
/* We avoid flushing 'non-old' blocks in an eviction flush, because the
@@ -1059,7 +1059,6 @@ static ulint buf_flush_try_neighbors(fil_space_t *space,
{
ut_ad(bpage == b);
bpage= nullptr;
- ut_ad(!buf_pool.watch_is_sentinel(*b));
ut_ad(b->oldest_modification() > 1);
flush:
if (b->flush(evict, space))
@@ -1070,7 +1069,7 @@ static ulint buf_flush_try_neighbors(fil_space_t *space,
}
/* We avoid flushing 'non-old' blocks in an eviction flush,
because the flushed blocks are soon freed */
- else if ((!evict || b->is_old()) && !buf_pool.watch_is_sentinel(*b) &&
+ else if ((!evict || b->is_old()) &&
b->oldest_modification() > 1 && b->lock.u_lock_try(true))
{
if (b->oldest_modification() < 2)
@@ -1169,7 +1168,7 @@ static void buf_flush_discard_page(buf_page_t *bpage)
ut_d(const auto state= bpage->state());
ut_ad(state == buf_page_t::FREED || state == buf_page_t::UNFIXED ||
- state == buf_page_t::IBUF_EXIST || state == buf_page_t::REINIT);
+ state == buf_page_t::REINIT);
bpage->lock.u_unlock(true);
buf_LRU_free_page(bpage, true);
}
@@ -1744,7 +1743,7 @@ inline void log_t::write_checkpoint(lsn_t end_lsn) noexcept
resize_log.write(CHECKPOINT_1, {c, get_block_size()});
}
- if (srv_file_flush_method != SRV_O_DSYNC)
+ if (!log_write_through)
ut_a(log.flush());
latch.wr_lock(SRW_LOCK_CALL);
ut_ad(checkpoint_pending);
@@ -1776,7 +1775,7 @@ inline void log_t::write_checkpoint(lsn_t end_lsn) noexcept
if (!is_pmem())
{
- if (srv_file_flush_method != SRV_O_DSYNC)
+ if (!log_write_through)
ut_a(resize_log.flush());
IF_WIN(log.close(),);
}
@@ -1922,13 +1921,7 @@ static bool log_checkpoint()
if (recv_recovery_is_on())
recv_sys.apply(true);
- switch (srv_file_flush_method) {
- case SRV_NOSYNC:
- case SRV_O_DIRECT_NO_FSYNC:
- break;
- default:
- fil_flush_file_spaces();
- }
+ fil_flush_file_spaces();
log_sys.latch.wr_lock(SRW_LOCK_CALL);
const lsn_t end_lsn= log_sys.get_lsn();
@@ -2073,13 +2066,7 @@ ATTRIBUTE_COLD static void buf_flush_sync_for_checkpoint(lsn_t lsn)
MONITOR_FLUSH_SYNC_PAGES, n_flushed);
}
- switch (srv_file_flush_method) {
- case SRV_NOSYNC:
- case SRV_O_DIRECT_NO_FSYNC:
- break;
- default:
- fil_flush_file_spaces();
- }
+ fil_flush_file_spaces();
log_sys.latch.wr_lock(SRW_LOCK_CALL);
const lsn_t newest_lsn= log_sys.get_lsn();
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index 1a0e481ece4..e4e20e8335f 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -385,14 +385,15 @@ we put it to free list to be used.
* scan whole LRU list
* scan LRU list even if buf_pool.try_LRU_scan is not set
-@param have_mutex whether buf_pool.mutex is already being held
-@return the free control block, in state BUF_BLOCK_MEMORY */
-buf_block_t *buf_LRU_get_free_block(bool have_mutex)
+@param get how to allocate the block
+@return the free control block, in state BUF_BLOCK_MEMORY
+@retval nullptr if get==have_no_mutex_soft and memory was not available */
+buf_block_t* buf_LRU_get_free_block(buf_LRU_get get)
{
ulint n_iterations = 0;
ulint flush_failures = 0;
MONITOR_INC(MONITOR_LRU_GET_FREE_SEARCH);
- if (have_mutex) {
+ if (UNIV_UNLIKELY(get == have_mutex)) {
mysql_mutex_assert_owner(&buf_pool.mutex);
goto got_mutex;
}
@@ -411,13 +412,14 @@ got_mutex:
DBUG_EXECUTE_IF("ib_lru_force_no_free_page",
if (!buf_lru_free_blocks_error_printed) {
n_iterations = 21;
+ block = nullptr;
goto not_found;});
retry:
/* If there is a block in the free list, take it */
if ((block = buf_LRU_get_free_only()) != nullptr) {
got_block:
- if (!have_mutex) {
+ if (UNIV_LIKELY(get != have_mutex)) {
mysql_mutex_unlock(&buf_pool.mutex);
}
block->page.zip.clear();
@@ -441,6 +443,11 @@ got_block:
buf_pool.try_LRU_scan = false;
}
+ if (get == have_no_mutex_soft) {
+ mysql_mutex_unlock(&buf_pool.mutex);
+ return nullptr;
+ }
+
for (;;) {
if ((block = buf_LRU_get_free_only()) != nullptr) {
goto got_block;
diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc
index fa91939acee..bbd905365ed 100644
--- a/storage/innobase/buf/buf0rea.cc
+++ b/storage/innobase/buf/buf0rea.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2022, MariaDB Corporation.
+Copyright (c) 2015, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -35,7 +35,7 @@ Created 11/5/1995 Heikki Tuuri
#include "buf0lru.h"
#include "buf0buddy.h"
#include "buf0dblwr.h"
-#include "ibuf0ibuf.h"
+#include "page0zip.h"
#include "log0recv.h"
#include "trx0sys.h"
#include "os0file.h"
@@ -43,122 +43,89 @@ Created 11/5/1995 Heikki Tuuri
#include "srv0srv.h"
#include "log.h"
+TRANSACTIONAL_TARGET
+bool buf_pool_t::page_hash_contains(const page_id_t page_id, hash_chain &chain)
+{
+ transactional_shared_lock_guard<page_hash_latch> g
+ {page_hash.lock_get(chain)};
+ return page_hash.get(page_id, chain);
+}
+
/** If there are buf_pool.curr_size per the number below pending reads, then
read-ahead is not done: this is to prevent flooding the buffer pool with
i/o-fixed buffer blocks */
#define BUF_READ_AHEAD_PEND_LIMIT 2
-/** Remove the sentinel block for the watch before replacing it with a
-real block. watch_unset() or watch_occurred() will notice
-that the block has been replaced with the real block.
-@param w sentinel
-@param chain locked hash table chain
-@return w->state() */
-inline uint32_t buf_pool_t::watch_remove(buf_page_t *w,
- buf_pool_t::hash_chain &chain)
-{
- mysql_mutex_assert_owner(&buf_pool.mutex);
- ut_ad(xtest() || page_hash.lock_get(chain).is_write_locked());
- ut_ad(w >= &watch[0]);
- ut_ad(w < &watch[array_elements(watch)]);
- ut_ad(!w->in_zip_hash);
- ut_ad(!w->zip.data);
-
- uint32_t s{w->state()};
- w->set_state(buf_page_t::NOT_USED);
- ut_ad(s >= buf_page_t::UNFIXED);
- ut_ad(s < buf_page_t::READ_FIX);
-
- if (~buf_page_t::LRU_MASK & s)
- page_hash.remove(chain, w);
-
- ut_ad(!w->in_page_hash);
- w->id_= page_id_t(~0ULL);
- return s;
-}
-
/** Initialize a page for read to the buffer buf_pool. If the page is
(1) already in buf_pool, or
-(2) if we specify to read only ibuf pages and the page is not an ibuf page, or
-(3) if the space is deleted or being deleted,
+(2) if the tablespace has been or is being deleted,
then this function does nothing.
Sets the io_fix flag to BUF_IO_READ and sets a non-recursive exclusive lock
on the buffer frame. The io-handler must take care that the flag is cleared
and the lock released later.
-@param[in] mode BUF_READ_IBUF_PAGES_ONLY, ...
-@param[in] page_id page id
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in] unzip whether the uncompressed page is
- requested (for ROW_FORMAT=COMPRESSED)
+@param page_id page identifier
+@param zip_size ROW_FORMAT=COMPRESSED page size, or 0,
+ bitwise-ORed with 1 in recovery
+@param chain buf_pool.page_hash cell for page_id
+@param block preallocated buffer block (set to nullptr if consumed)
@return pointer to the block
-@retval NULL in case of an error */
+@retval nullptr in case of an error */
TRANSACTIONAL_TARGET
-static buf_page_t* buf_page_init_for_read(ulint mode, const page_id_t page_id,
- ulint zip_size, bool unzip)
+static buf_page_t *buf_page_init_for_read(const page_id_t page_id,
+ ulint zip_size,
+ buf_pool_t::hash_chain &chain,
+ buf_block_t *&block)
{
- mtr_t mtr;
-
- if (mode == BUF_READ_IBUF_PAGES_ONLY)
- {
- /* It is a read-ahead within an ibuf routine */
- ut_ad(!ibuf_bitmap_page(page_id, zip_size));
- ibuf_mtr_start(&mtr);
-
- if (!recv_no_ibuf_operations && !ibuf_page(page_id, zip_size, &mtr))
- {
- ibuf_mtr_commit(&mtr);
- return nullptr;
- }
- }
- else
- ut_ad(mode == BUF_READ_ANY_PAGE);
-
buf_page_t *bpage= nullptr;
- buf_block_t *block= nullptr;
- if (!zip_size || unzip || recv_recovery_is_on())
+ if (!zip_size || (zip_size & 1))
{
- block= buf_LRU_get_free_block(false);
- block->initialise(page_id, zip_size, buf_page_t::READ_FIX);
+ bpage= &block->page;
+ block->initialise(page_id, zip_size & ~1, buf_page_t::READ_FIX);
/* x_unlock() will be invoked
in buf_page_t::read_complete() by the io-handler thread. */
block->page.lock.x_lock(true);
}
- buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(page_id.fold());
-
- mysql_mutex_lock(&buf_pool.mutex);
-
- buf_page_t *hash_page= buf_pool.page_hash.get(page_id, chain);
- if (hash_page && !buf_pool.watch_is_sentinel(*hash_page))
+ page_hash_latch &hash_lock= buf_pool.page_hash.lock_get(chain);
+ hash_lock.lock();
+ if (buf_pool.page_hash.get(page_id, chain))
{
+page_exists:
+ hash_lock.unlock();
/* The page is already in the buffer pool. */
- if (block)
+ if (bpage)
{
- block->page.lock.x_unlock(true);
- ut_d(block->page.set_state(buf_page_t::MEMORY));
- buf_LRU_block_free_non_file_page(block);
+ bpage->lock.x_unlock(true);
+ ut_d(mysql_mutex_lock(&buf_pool.mutex));
+ ut_d(bpage->set_state(buf_page_t::MEMORY));
+ ut_d(mysql_mutex_unlock(&buf_pool.mutex));
}
- goto func_exit;
+ return nullptr;
}
- if (UNIV_LIKELY(block != nullptr))
+ if (UNIV_UNLIKELY(mysql_mutex_trylock(&buf_pool.mutex)))
{
- bpage= &block->page;
-
- /* Insert into the hash table of file pages */
+ hash_lock.unlock();
+ mysql_mutex_lock(&buf_pool.mutex);
+ hash_lock.lock();
+ if (buf_pool.page_hash.get(page_id, chain))
{
- transactional_lock_guard<page_hash_latch> g
- {buf_pool.page_hash.lock_get(chain)};
+ mysql_mutex_unlock(&buf_pool.mutex);
+ goto page_exists;
+ }
+ }
- if (hash_page)
- bpage->set_state(buf_pool.watch_remove(hash_page, chain) +
- (buf_page_t::READ_FIX - buf_page_t::UNFIXED));
+ zip_size&= ~1;
- buf_pool.page_hash.append(chain, &block->page);
- }
+ if (UNIV_LIKELY(bpage != nullptr))
+ {
+ block= nullptr;
+ /* Insert into the hash table of file pages */
+ buf_pool.page_hash.append(chain, bpage);
+ hash_lock.unlock();
/* The block must be put to the LRU list, to the old blocks */
- buf_LRU_add_block(&block->page, true/* to old blocks */);
+ buf_LRU_add_block(bpage, true/* to old blocks */);
if (UNIV_UNLIKELY(zip_size))
{
@@ -166,19 +133,19 @@ static buf_page_t* buf_page_init_for_read(ulint mode, const page_id_t page_id,
buf_buddy_alloc(). We must defer this operation until after the
block descriptor has been added to buf_pool.LRU and
buf_pool.page_hash. */
- block->page.zip.data= static_cast<page_zip_t*>
- (buf_buddy_alloc(zip_size));
+ bpage->zip.data= static_cast<page_zip_t*>(buf_buddy_alloc(zip_size));
/* To maintain the invariant
block->in_unzip_LRU_list == block->page.belongs_to_unzip_LRU()
we have to add this block to unzip_LRU
after block->page.zip.data is set. */
- ut_ad(block->page.belongs_to_unzip_LRU());
- buf_unzip_LRU_add_block(block, TRUE);
+ ut_ad(bpage->belongs_to_unzip_LRU());
+ buf_unzip_LRU_add_block(reinterpret_cast<buf_block_t*>(bpage), TRUE);
}
}
else
{
+ hash_lock.unlock();
/* The compressed page must be allocated before the
control block (bpage), in order to avoid the
invocation of buf_buddy_relocate_block() on
@@ -191,9 +158,7 @@ static buf_page_t* buf_page_init_for_read(ulint mode, const page_id_t page_id,
check the page_hash again, as it may have been modified. */
if (UNIV_UNLIKELY(lru))
{
- hash_page= buf_pool.page_hash.get(page_id, chain);
-
- if (UNIV_UNLIKELY(hash_page && !buf_pool.watch_is_sentinel(*hash_page)))
+ if (UNIV_LIKELY_NULL(buf_pool.page_hash.get(page_id, chain)))
{
/* The block was added by some other thread. */
buf_buddy_free(data, zip_size);
@@ -213,11 +178,6 @@ static buf_page_t* buf_page_init_for_read(ulint mode, const page_id_t page_id,
{
transactional_lock_guard<page_hash_latch> g
{buf_pool.page_hash.lock_get(chain)};
-
- if (hash_page)
- bpage->set_state(buf_pool.watch_remove(hash_page, chain) +
- (buf_page_t::READ_FIX - buf_page_t::UNFIXED));
-
buf_pool.page_hash.append(chain, bpage);
}
@@ -229,13 +189,9 @@ static buf_page_t* buf_page_init_for_read(ulint mode, const page_id_t page_id,
buf_pool.stat.n_pages_read++;
mysql_mutex_unlock(&buf_pool.mutex);
buf_pool.n_pend_reads++;
- goto func_exit_no_mutex;
+ return bpage;
func_exit:
mysql_mutex_unlock(&buf_pool.mutex);
-func_exit_no_mutex:
- if (mode == BUF_READ_IBUF_PAGES_ONLY)
- ibuf_mtr_commit(&mtr);
-
ut_ad(!bpage || bpage->in_file());
return bpage;
@@ -246,54 +202,31 @@ buffer buf_pool if it is not already there, in which case does nothing.
Sets the io_fix flag and sets an exclusive lock on the buffer frame. The
flag is cleared and the x-lock released by an i/o-handler thread.
+@param[in] page_id page id
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0,
+ bitwise-ORed with 1 in recovery
+@param[in,out] chain buf_pool.page_hash cell for page_id
@param[in,out] space tablespace
+@param[in,out] block preallocated buffer block
@param[in] sync true if synchronous aio is desired
-@param[in] mode BUF_READ_IBUF_PAGES_ONLY, ...,
-@param[in] page_id page id
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in] unzip true=request uncompressed page
@return error code
@retval DB_SUCCESS if the page was read
@retval DB_SUCCESS_LOCKED_REC if the page exists in the buffer pool already */
static
dberr_t
buf_read_page_low(
- fil_space_t* space,
- bool sync,
- ulint mode,
const page_id_t page_id,
ulint zip_size,
- bool unzip)
+ buf_pool_t::hash_chain& chain,
+ fil_space_t* space,
+ buf_block_t*& block,
+ bool sync = false)
{
buf_page_t* bpage;
- if (buf_dblwr.is_inside(page_id)) {
- ib::error() << "Trying to read doublewrite buffer page "
- << page_id;
- ut_ad(0);
- space->release();
- return DB_PAGE_CORRUPTED;
- }
-
- if (sync) {
- } else if (trx_sys_hdr_page(page_id)
- || ibuf_bitmap_page(page_id, zip_size)
- || (!recv_no_ibuf_operations
- && ibuf_page(page_id, zip_size, nullptr))) {
-
- /* Trx sys header is so low in the latching order that we play
- safe and do not leave the i/o-completion to an asynchronous
- i/o-thread. Change buffer pages must always be read with
- synchronous i/o, to make sure they do not get involved in
- thread deadlocks. */
- sync = true;
- }
+ ut_ad(!buf_dblwr.is_inside(page_id));
- /* The following call will also check if the tablespace does not exist
- or is being dropped; if we succeed in initing the page in the buffer
- pool for read, then DISCARD cannot proceed until the read has
- completed */
- bpage = buf_page_init_for_read(mode, page_id, zip_size, unzip);
+ bpage = buf_page_init_for_read(page_id, zip_size, chain, block);
if (!bpage) {
space->release();
@@ -308,10 +241,10 @@ buf_read_page_low(
DBUG_LOG("ib_buf",
"read page " << page_id << " zip_size=" << zip_size
- << " unzip=" << unzip << ',' << (sync ? "sync" : "async"));
+ << (sync ? " sync" : " async"));
- void* dst = zip_size ? bpage->zip.data : bpage->frame;
- const ulint len = zip_size ? zip_size : srv_page_size;
+ void* dst = zip_size > 1 ? bpage->zip.data : bpage->frame;
+ const ulint len = zip_size & ~1 ? zip_size & ~1 : srv_page_size;
auto fio = space->io(IORequest(sync
? IORequest::READ_SYNC
@@ -335,25 +268,35 @@ buf_read_page_low(
return fio.err;
}
+/** Acquire a buffer block. */
+static buf_block_t *buf_read_acquire()
+{
+ return buf_LRU_get_free_block(have_no_mutex_soft);
+}
+
+/** Free a buffer block if needed. */
+static void buf_read_release(buf_block_t *block)
+{
+ if (block)
+ {
+ mysql_mutex_lock(&buf_pool.mutex);
+ buf_LRU_block_free_non_file_page(block);
+ mysql_mutex_unlock(&buf_pool.mutex);
+ }
+}
+
/** Applies a random read-ahead in buf_pool if there are at least a threshold
value of accessed pages from the random read-ahead area. Does not read any
page, not even the one at the position (space, offset), if the read-ahead
-mechanism is not activated. NOTE 1: the calling thread may own latches on
+mechanism is not activated. NOTE: the calling thread may own latches on
pages: to avoid deadlocks this function must be written such that it cannot
-end up waiting for these latches! NOTE 2: the calling thread must want
-access to the page given: this rule is set to prevent unintended read-aheads
-performed by ibuf routines, a situation which could result in a deadlock if
-the OS does not support asynchronous i/o.
+end up waiting for these latches!
@param[in] page_id page id of a page which the current thread
wants to access
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in] ibuf whether we are inside ibuf routine
-@return number of page read requests issued; NOTE that if we read ibuf
-pages, it may happen that the page at the given page number does not
-get read even if we return a positive value! */
+@return number of page read requests issued */
TRANSACTIONAL_TARGET
-ulint
-buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf)
+ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size)
{
if (!srv_random_read_ahead)
return 0;
@@ -362,11 +305,6 @@ buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf)
/* No read-ahead to avoid thread deadlocks */
return 0;
- if (ibuf_bitmap_page(page_id, zip_size) || trx_sys_hdr_page(page_id))
- /* If it is an ibuf bitmap page or trx sys hdr, we do no
- read-ahead, as that could break the ibuf page access order */
- return 0;
-
if (buf_pool.n_pend_reads > buf_pool.curr_size / BUF_READ_AHEAD_PEND_LIMIT)
return 0;
@@ -402,18 +340,23 @@ read_ahead:
goto no_read_ahead;
/* Read all the suitable blocks within the area */
- const ulint ibuf_mode= ibuf ? BUF_READ_IBUF_PAGES_ONLY : BUF_READ_ANY_PAGE;
+ buf_block_t *block= nullptr;
+ if (!zip_size && !(block= buf_read_acquire()))
+ goto no_read_ahead;
for (page_id_t i= low; i < high; ++i)
{
- if (ibuf_bitmap_page(i, zip_size))
- continue;
if (space->is_stopping())
break;
+ buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(i.fold());
space->reacquire();
- if (buf_read_page_low(space, false, ibuf_mode, i, zip_size, false) ==
- DB_SUCCESS)
+ if (buf_read_page_low(i, zip_size, chain, space, block) == DB_SUCCESS)
+ {
count++;
+ ut_ad(!block);
+ if (!zip_size && !(block= buf_read_acquire()))
+ break;
+ }
}
if (count)
@@ -430,6 +373,7 @@ read_ahead:
}
space->release();
+ buf_read_release(block);
return count;
}
@@ -437,15 +381,17 @@ read_ahead:
if it is not already there. Sets the io_fix and an exclusive lock
on the buffer frame. The flag is cleared and the x-lock
released by the i/o-handler thread.
-@param[in] page_id page id
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@retval DB_SUCCESS if the page was read and is not corrupted
+@param page_id page id
+@param zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param chain buf_pool.page_hash cell for page_id
+@retval DB_SUCCESS if the page was read and is not corrupted,
@retval DB_SUCCESS_LOCKED_REC if the page was not read
-@retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted
+@retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted,
@retval DB_DECRYPTION_FAILED if page post encryption checksum matches but
after decryption normal page checksum does not match.
@retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */
-dberr_t buf_read_page(const page_id_t page_id, ulint zip_size)
+dberr_t buf_read_page(const page_id_t page_id, ulint zip_size,
+ buf_pool_t::hash_chain &chain)
{
fil_space_t *space= fil_space_t::get(page_id.space());
if (!space)
@@ -455,9 +401,20 @@ dberr_t buf_read_page(const page_id_t page_id, ulint zip_size)
return DB_TABLESPACE_DELETED;
}
- buf_LRU_stat_inc_io(); /* NOT protected by buf_pool.mutex */
- return buf_read_page_low(space, true, BUF_READ_ANY_PAGE,
- page_id, zip_size, false);
+ /* Our caller should already have ensured that the page does not
+ exist in buf_pool.page_hash. */
+ buf_block_t *block= nullptr;
+ if (UNIV_LIKELY(!zip_size))
+ {
+ mysql_mutex_lock(&buf_pool.mutex);
+ buf_LRU_stat_inc_io();
+ block= buf_LRU_get_free_block(have_mutex);
+ mysql_mutex_unlock(&buf_pool.mutex);
+ }
+
+ dberr_t err= buf_read_page_low(page_id, zip_size, chain, space, block, true);
+ buf_read_release(block);
+ return err;
}
/** High-level function which reads a page asynchronously from a file to the
@@ -470,15 +427,30 @@ released by the i/o-handler thread.
void buf_read_page_background(fil_space_t *space, const page_id_t page_id,
ulint zip_size)
{
- buf_read_page_low(space, false, BUF_READ_ANY_PAGE,
- page_id, zip_size, false);
-
- /* We do not increment number of I/O operations used for LRU policy
- here (buf_LRU_stat_inc_io()). We use this in heuristics to decide
- about evicting uncompressed version of compressed pages from the
- buffer pool. Since this function is called from buffer pool load
- these IOs are deliberate and are not part of normal workload we can
- ignore these in our heuristics. */
+ buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(page_id.fold());
+ if (buf_pool.page_hash_contains(page_id, chain))
+ {
+ skip:
+ space->release();
+ return;
+ }
+
+ buf_block_t *block= nullptr;
+ if (!zip_size && !(block= buf_read_acquire()))
+ goto skip;
+
+ if (buf_read_page_low(page_id, zip_size, chain, space, block) ==
+ DB_SUCCESS)
+ ut_ad(!block);
+ else
+ buf_read_release(block);
+
+ /* We do not increment number of I/O operations used for LRU policy
+ here (buf_LRU_stat_inc_io()). We use this in heuristics to decide
+ about evicting uncompressed version of ROW_FORMAT=COMPRESSED pages
+ from the buffer pool. Since this function is called from buffer pool
+ load these IOs are deliberate and are not part of normal workload we
+ can ignore these in our heuristics. */
}
/** Applies linear read-ahead if in the buf_pool the page is a border page of
@@ -500,16 +472,11 @@ only very improbably.
NOTE 2: the calling thread may own latches on pages: to avoid deadlocks this
function must be written such that it cannot end up waiting for these
latches!
-NOTE 3: the calling thread must want access to the page given: this rule is
-set to prevent unintended read-aheads performed by ibuf routines, a situation
-which could result in a deadlock if the OS does not support asynchronous io.
@param[in] page_id page id; see NOTE 3 above
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in] ibuf whether if we are inside ibuf routine
@return number of page read requests issued */
TRANSACTIONAL_TARGET
-ulint
-buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
+ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size)
{
/* check if readahead is disabled */
if (!srv_read_ahead_threshold)
@@ -534,11 +501,6 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
/* This is not a border page of the area */
return 0;
- if (ibuf_bitmap_page(page_id, zip_size) || trx_sys_hdr_page(page_id))
- /* If it is an ibuf bitmap page or trx sys hdr, we do no
- read-ahead, as that could break the ibuf page access order */
- return 0;
-
fil_space_t *space= fil_space_t::get(page_id.space());
if (!space)
return 0;
@@ -621,18 +583,25 @@ failed:
}
/* If we got this far, read-ahead can be sensible: do it */
+ buf_block_t *block= nullptr;
+ if (!zip_size && !(block= buf_read_acquire()))
+ goto fail;
+
count= 0;
- for (ulint ibuf_mode= ibuf ? BUF_READ_IBUF_PAGES_ONLY : BUF_READ_ANY_PAGE;
- new_low != new_high_1; ++new_low)
+ for (; new_low != new_high_1; ++new_low)
{
- if (ibuf_bitmap_page(new_low, zip_size))
- continue;
if (space->is_stopping())
break;
+ buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(new_low.fold());
space->reacquire();
- if (buf_read_page_low(space, false, ibuf_mode, new_low, zip_size, false) ==
+ if (buf_read_page_low(new_low, zip_size, chain, space, block) ==
DB_SUCCESS)
+ {
count++;
+ ut_ad(!block);
+ if (!zip_size && !(block= buf_read_acquire()))
+ break;
+ }
}
if (count)
@@ -649,6 +618,7 @@ failed:
}
space->release();
+ buf_read_release(block);
return count;
}
@@ -671,7 +641,8 @@ void buf_read_recv_pages(uint32_t space_id, st_::span<uint32_t> page_nos)
return;
}
- const ulint zip_size = space->zip_size();
+ const ulint zip_size = space->zip_size() | 1;
+ buf_block_t* block = buf_LRU_get_free_block(have_no_mutex);
for (ulint i = 0; i < page_nos.size(); i++) {
@@ -701,10 +672,16 @@ void buf_read_recv_pages(uint32_t space_id, st_::span<uint32_t> page_nos)
}
}
+ buf_pool_t::hash_chain& chain =
+ buf_pool.page_hash.cell_get(cur_page_id.fold());
space->reacquire();
- switch (buf_read_page_low(space, false, BUF_READ_ANY_PAGE,
- cur_page_id, zip_size, true)) {
- case DB_SUCCESS: case DB_SUCCESS_LOCKED_REC:
+ switch (buf_read_page_low(cur_page_id, zip_size, chain, space,
+ block)) {
+ case DB_SUCCESS:
+ ut_ad(!block);
+ block = buf_LRU_get_free_block(have_no_mutex);
+ break;
+ case DB_SUCCESS_LOCKED_REC:
break;
default:
sql_print_error("InnoDB: Recovery failed to read page "
@@ -712,10 +689,12 @@ void buf_read_recv_pages(uint32_t space_id, st_::span<uint32_t> page_nos)
cur_page_id.page_no(),
space->chain.start->name);
}
+ ut_ad(block);
}
-
- DBUG_PRINT("ib_buf", ("recovery read (%zu pages) for %s",
+ DBUG_PRINT("ib_buf", ("recovery read (%zu pages) for %s",
page_nos.size(), space->chain.start->name));
space->release();
+
+ buf_read_release(block);
}
diff --git a/storage/innobase/data/data0type.cc b/storage/innobase/data/data0type.cc
index b1952bcc2a4..dc1c4b9a04a 100644
--- a/storage/innobase/data/data0type.cc
+++ b/storage/innobase/data/data0type.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -33,13 +33,6 @@ const byte reset_trx_id[DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN] = {
0x80, 0, 0, 0, 0, 0, 0
};
-/* At the database startup we store the default-charset collation number of
-this MySQL installation to this global variable. If we have < 4.1.2 format
-column definitions, or records in the insert buffer, we use this
-charset-collation code for them. */
-
-ulint data_mysql_default_charset_coll;
-
/*********************************************************************//**
Determine how many bytes the first n characters of the given string occupy.
If the string is shorter than n characters, returns the number of bytes
diff --git a/storage/innobase/dict/dict0boot.cc b/storage/innobase/dict/dict0boot.cc
index 5516bce920b..316d0f01322 100644
--- a/storage/innobase/dict/dict0boot.cc
+++ b/storage/innobase/dict/dict0boot.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2022, MariaDB Corporation.
+Copyright (c) 2016, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -30,7 +30,6 @@ Created 4/18/1996 Heikki Tuuri
#include "dict0load.h"
#include "trx0trx.h"
#include "srv0srv.h"
-#include "ibuf0ibuf.h"
#include "buf0flu.h"
#include "log0recv.h"
#include "os0file.h"
@@ -94,18 +93,6 @@ dict_hdr_get_new_id(
mtr.commit();
}
-/** Update dict_sys.row_id in the dictionary header file page. */
-void dict_hdr_flush_row_id(row_id_t id)
-{
- mtr_t mtr;
- mtr.start();
- buf_block_t* d= dict_hdr_get(&mtr);
- byte *row_id= DICT_HDR + DICT_HDR_ROW_ID + d->page.frame;
- if (mach_read_from_8(row_id) < id)
- mtr.write<8>(*d, row_id, id);
- mtr.commit();
-}
-
/** Create the DICT_HDR page on database initialization.
@return error code */
dberr_t dict_create()
@@ -127,10 +114,8 @@ dberr_t dict_create()
}
ut_a(d->page.id() == hdr_page_id);
- /* Start counting row, table, index, and tree ids from
+ /* Start counting table, index, and tree ids from
DICT_HDR_FIRST_ID */
- mtr.write<8>(*d, DICT_HDR + DICT_HDR_ROW_ID + d->page.frame,
- DICT_HDR_FIRST_ID);
mtr.write<8>(*d, DICT_HDR + DICT_HDR_TABLE_ID + d->page.frame,
DICT_HDR_FIRST_ID);
mtr.write<8>(*d, DICT_HDR + DICT_HDR_INDEX_ID + d->page.frame,
@@ -233,12 +218,12 @@ dberr_t dict_boot()
dict_sys.create();
dberr_t err;
- const buf_block_t *d = buf_page_get_gen(hdr_page_id, 0, RW_X_LATCH,
+ const buf_block_t *d = buf_page_get_gen(hdr_page_id, 0, RW_S_LATCH,
nullptr, BUF_GET, &mtr, &err);
- if (!d) {
+ if (!d) {
mtr.commit();
return err;
- }
+ }
heap = mem_heap_create(450);
@@ -246,17 +231,6 @@ dberr_t dict_boot()
const byte* dict_hdr = &d->page.frame[DICT_HDR];
- /* Because we only write new row ids to disk-based data structure
- (dictionary header) when it is divisible by
- DICT_HDR_ROW_ID_WRITE_MARGIN, in recovery we will not recover
- the latest value of the row id counter. Therefore we advance
- the counter at the database startup to avoid overlapping values.
- Note that when a user after database startup first time asks for
- a new row id, then because the counter is now divisible by
- ..._MARGIN, it will immediately be updated to the disk-based
- header. */
-
- dict_sys.recover_row_id(mach_read_from_8(dict_hdr + DICT_HDR_ROW_ID));
if (uint32_t max_space_id
= mach_read_from_4(dict_hdr + DICT_HDR_MAX_SPACE_ID)) {
max_space_id--;
@@ -420,10 +394,7 @@ dberr_t dict_boot()
mtr.commit();
- err = ibuf_init_at_db_start();
-
- if (err == DB_SUCCESS || srv_force_recovery >= SRV_FORCE_NO_DDL_UNDO) {
- err = DB_SUCCESS;
+ if (err == DB_SUCCESS) {
/* Load definitions of other indexes on system tables */
dict_load_sys_table(dict_sys.sys_tables);
diff --git a/storage/innobase/dict/dict0defrag_bg.cc b/storage/innobase/dict/dict0defrag_bg.cc
index bec6da8e6af..b688f3970fc 100644
--- a/storage/innobase/dict/dict0defrag_bg.cc
+++ b/storage/innobase/dict/dict0defrag_bg.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2016, 2022, MariaDB Corporation.
+Copyright (c) 2016, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -214,9 +214,6 @@ Save defragmentation result.
@return DB_SUCCESS or error code */
dberr_t dict_stats_save_defrag_summary(dict_index_t *index, THD *thd)
{
- if (index->is_ibuf())
- return DB_SUCCESS;
-
MDL_ticket *mdl_table= nullptr, *mdl_index= nullptr;
dict_table_t *table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
DICT_ERR_IGNORE_NONE);
@@ -336,8 +333,6 @@ dict_stats_save_defrag_stats(
/*============================*/
dict_index_t* index) /*!< in: index */
{
- if (index->is_ibuf())
- return DB_SUCCESS;
if (!index->is_readable())
return dict_stats_report_error(index->table, true);
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index d2fa8555e43..6df8ee0699a 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1180,6 +1180,7 @@ inline void dict_sys_t::add(dict_table_t* table)
ulint fold = my_crc32c(0, table->name.m_name,
strlen(table->name.m_name));
+ table->row_id = 0;
table->autoinc_mutex.init();
table->lock_mutex_init();
@@ -1999,7 +2000,6 @@ dict_index_add_to_cache(
ut_ad(index->n_def == index->n_fields);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
ut_ad(!dict_index_is_online_ddl(index));
- ut_ad(!dict_index_is_ibuf(index));
ut_d(mem_heap_validate(index->heap));
ut_a(!dict_index_is_clust(index)
@@ -2381,15 +2381,7 @@ dict_index_copy_types(
ulint n_fields) /*!< in: number of
field types to copy */
{
- ulint i;
-
- if (dict_index_is_ibuf(index)) {
- dtuple_set_types_binary(tuple, n_fields);
-
- return;
- }
-
- for (i = 0; i < n_fields; i++) {
+ for (ulint i = 0; i < n_fields; i++) {
const dict_field_t* ifield;
dtype_t* dfield_type;
@@ -2628,17 +2620,14 @@ dict_index_build_internal_non_clust(
ulint i;
ibool* indexed;
- ut_ad(table && index);
- ut_ad(!dict_index_is_clust(index));
- ut_ad(!dict_index_is_ibuf(index));
+ ut_ad(!index->is_primary());
ut_ad(dict_sys.locked());
/* The clustered index should be the first in the list of indexes */
clust_index = UT_LIST_GET_FIRST(table->indexes);
ut_ad(clust_index);
- ut_ad(dict_index_is_clust(clust_index));
- ut_ad(!dict_index_is_ibuf(clust_index));
+ ut_ad(clust_index->is_clust());
/* Create a new index */
new_index = dict_mem_index_create(
@@ -3769,24 +3758,7 @@ dict_index_build_node_ptr(
dtuple_t* tuple;
dfield_t* field;
byte* buf;
- ulint n_unique;
-
- if (dict_index_is_ibuf(index)) {
- /* In a universal index tree, we take the whole record as
- the node pointer if the record is on the leaf level,
- on non-leaf levels we remove the last field, which
- contains the page number of the child page */
-
- ut_a(!dict_table_is_comp(index->table));
- n_unique = rec_get_n_fields_old(rec);
-
- if (level > 0) {
- ut_a(n_unique > 1);
- n_unique--;
- }
- } else {
- n_unique = dict_index_get_n_unique_in_tree_nonleaf(index);
- }
+ ulint n_unique = dict_index_get_n_unique_in_tree_nonleaf(index);
tuple = dtuple_create(heap, n_unique + 1);
diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc
index bd3bd71544a..9d5568f965b 100644
--- a/storage/innobase/dict/dict0load.cc
+++ b/storage/innobase/dict/dict0load.cc
@@ -865,9 +865,7 @@ err_exit:
return READ_OK;
}
-/** Check each tablespace found in the data dictionary.
-Then look at each table defined in SYS_TABLES that has a space_id > 0
-to find all the file-per-table tablespaces.
+/** Open each tablespace found in the data dictionary.
In a crash recovery we already have some tablespace objects created from
processing the REDO log. We will compare the
@@ -876,14 +874,12 @@ tablespace file. In addition, more validation will be done if recovery
was needed and force_recovery is not set.
We also scan the biggest space id, and store it to fil_system. */
-void dict_check_tablespaces_and_store_max_id()
+void dict_load_tablespaces()
{
uint32_t max_space_id = 0;
btr_pcur_t pcur;
mtr_t mtr;
- DBUG_ENTER("dict_check_tablespaces_and_store_max_id");
-
mtr.start();
dict_sys.lock(SRW_LOCK_CALL);
@@ -976,8 +972,6 @@ void dict_check_tablespaces_and_store_max_id()
fil_set_max_space_id_if_bigger(max_space_id);
dict_sys.unlock();
-
- DBUG_VOID_RETURN;
}
/** Error message for a delete-marked record in dict_load_column_low() */
@@ -1125,7 +1119,7 @@ err_len:
prtype = dtype_form_prtype(
prtype,
- data_mysql_default_charset_coll);
+ default_charset_info->number);
}
}
@@ -2475,9 +2469,7 @@ corrupted:
goto corrupted;
}
- if (table->supports_instant()) {
- err = btr_cur_instant_init(table);
- }
+ err = btr_cur_instant_init(table);
}
} else {
ut_ad(ignore_err & DICT_ERR_IGNORE_INDEX);
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index 39f5943d5a4..04b1ec88ac3 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -584,8 +584,6 @@ dict_stats_table_clone_create(
continue;
}
- ut_ad(!dict_index_is_ibuf(index));
-
ulint n_uniq = dict_index_get_n_unique(index);
heap_size += sizeof(dict_index_t);
@@ -634,8 +632,6 @@ dict_stats_table_clone_create(
continue;
}
- ut_ad(!dict_index_is_ibuf(index));
-
dict_index_t* idx;
idx = (dict_index_t*) mem_heap_zalloc(heap, sizeof(*idx));
@@ -714,7 +710,6 @@ dict_stats_empty_index(
/*!< in: whether to empty defrag stats */
{
ut_ad(!(index->type & DICT_FTS));
- ut_ad(!dict_index_is_ibuf(index));
ut_ad(index->table->stats_mutex_is_owner());
ulint n_uniq = index->n_uniq;
@@ -767,8 +762,6 @@ dict_stats_empty_table(
continue;
}
- ut_ad(!dict_index_is_ibuf(index));
-
dict_stats_empty_index(index, empty_defrag_stats);
}
@@ -901,8 +894,6 @@ dict_stats_copy(
}
}
- ut_ad(!dict_index_is_ibuf(dst_idx));
-
if (!INDEX_EQ(src_idx, dst_idx)) {
for (src_idx = dict_table_get_first_index(src);
src_idx != NULL;
@@ -1094,11 +1085,10 @@ btr_cur_t::open_random_leaf(rec_offs *&offsets, mem_heap_t *&heap, mtr_t &mtr)
dberr_t err;
auto offset= index()->page;
- bool merge= false;
ulint height= ULINT_UNDEFINED;
while (buf_block_t *block=
- btr_block_get(*index(), offset, RW_S_LATCH, merge, &mtr, &err))
+ btr_block_get(*index(), offset, RW_S_LATCH, &mtr, &err))
{
page_cur.block= block;
@@ -1120,8 +1110,7 @@ btr_cur_t::open_random_leaf(rec_offs *&offsets, mem_heap_t *&heap, mtr_t &mtr)
return DB_SUCCESS;
}
- if (!--height)
- merge= !index()->is_clust();
+ height--;
page_cur_open_on_rnd_user_rec(&page_cur);
@@ -1462,10 +1451,6 @@ dummy_empty:
dict_stats_empty_index(index, false);
index->table->stats_mutex_unlock();
return err;
-#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
- } else if (ibuf_debug && !dict_index_is_clust(index)) {
- goto dummy_empty;
-#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
} else if (dict_index_is_online_ddl(index) || !index->is_committed()
|| !index->table->space) {
goto dummy_empty;
@@ -1571,9 +1556,6 @@ empty_table:
}
for (; index != NULL; index = dict_table_get_next_index(index)) {
-
- ut_ad(!dict_index_is_ibuf(index));
-
if (!index->is_btree()) {
continue;
}
@@ -1638,9 +1620,7 @@ static dberr_t page_cur_open_level(page_cur_t *page_cur, ulint level,
for (ulint height = ULINT_UNDEFINED;; height--)
{
- buf_block_t* block=
- btr_block_get(*index, page, RW_S_LATCH,
- !height && !index->is_clust(), mtr, &err);
+ buf_block_t* block= btr_block_get(*index, page, RW_S_LATCH, mtr, &err);
if (!block)
break;
@@ -2258,9 +2238,7 @@ dict_stats_analyze_index_below_cur(
block = buf_page_get_gen(page_id, zip_size,
RW_S_LATCH, NULL, BUF_GET,
- &mtr, &err,
- !index->is_clust()
- && 1 == btr_page_get_level(page));
+ &mtr, &err);
if (!block) {
goto func_exit;
}
@@ -2999,7 +2977,6 @@ dict_stats_update_persistent(
return(DB_CORRUPTION);
}
- ut_ad(!dict_index_is_ibuf(index));
table->stats_mutex_lock();
dict_stats_empty_index(index, false);
table->stats_mutex_unlock();
@@ -3380,8 +3357,6 @@ unlocked_free_and_exit:
continue;
}
- ut_ad(!dict_index_is_ibuf(index));
-
for (unsigned i = 0; i < index->n_uniq; i++) {
char stat_name[16];
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index a3d9df4af1b..c1fd916be55 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2021, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2022, MariaDB Corporation.
+Copyright (c) 2014, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -45,7 +45,6 @@ Created 10/25/1995 Heikki Tuuri
#include "srv0start.h"
#include "trx0purge.h"
#include "buf0lru.h"
-#include "ibuf0ibuf.h"
#include "buf0flu.h"
#include "log.h"
#ifdef __linux__
@@ -496,6 +495,9 @@ void fil_space_t::flush_low()
break;
}
+ if (fil_system.is_write_through())
+ goto skip_flush;
+
fil_n_pending_tablespace_flushes++;
for (fil_node_t *node= UT_LIST_GET_FIRST(chain); node;
node= UT_LIST_GET_NEXT(chain, node))
@@ -520,8 +522,9 @@ void fil_space_t::flush_low()
mysql_mutex_unlock(&fil_system.mutex);
}
- clear_flush();
fil_n_pending_tablespace_flushes--;
+skip_flush:
+ clear_flush();
}
/** Try to extend a tablespace.
@@ -750,7 +753,6 @@ inline pfs_os_file_t fil_node_t::close_to_free(bool detach_handle)
{
if (space->is_in_unflushed_spaces)
{
- ut_ad(srv_file_flush_method != SRV_O_DIRECT_NO_FSYNC);
space->is_in_unflushed_spaces= false;
fil_system.unflushed_spaces.remove(*space);
}
@@ -783,7 +785,6 @@ pfs_os_file_t fil_system_t::detach(fil_space_t *space, bool detach_handle)
if (space->is_in_unflushed_spaces)
{
- ut_ad(srv_file_flush_method != SRV_O_DIRECT_NO_FSYNC);
space->is_in_unflushed_spaces= false;
unflushed_spaces.remove(*space);
}
@@ -1341,6 +1342,120 @@ ATTRIBUTE_COLD void fil_system_t::extend_to_recv_size()
mysql_mutex_unlock(&mutex);
}
+ATTRIBUTE_COLD void fil_space_t::reopen_all()
+{
+ mysql_mutex_assert_owner(&fil_system.mutex);
+ fil_system.freeze_space_list++;
+
+ for (fil_space_t &space : fil_system.space_list)
+ {
+ for (fil_node_t *node= UT_LIST_GET_FIRST(space.chain); node;
+ node= UT_LIST_GET_NEXT(chain, node))
+ if (node->is_open())
+ goto need_to_close;
+ continue;
+
+ need_to_close:
+ uint32_t p= space.n_pending.fetch_or(CLOSING, std::memory_order_acquire);
+ if (p & (STOPPING | CLOSING))
+ continue;
+
+ for (fil_node_t *node= UT_LIST_GET_FIRST(space.chain); node;
+ node= UT_LIST_GET_NEXT(chain, node))
+ {
+ if (!node->is_open())
+ continue;
+
+ ulint type= OS_DATA_FILE;
+
+ switch (FSP_FLAGS_GET_ZIP_SSIZE(space.flags)) {
+ case 1: case 2:
+ type= OS_DATA_FILE_NO_O_DIRECT;
+ }
+
+ for (ulint count= 10000; count--;)
+ {
+ p= space.pending();
+
+ if (!(p & CLOSING) || (p & STOPPING))
+ break;
+
+ if (!(p & PENDING) && !node->being_extended)
+ {
+ space.reacquire();
+ mysql_mutex_unlock(&fil_system.mutex);
+ /* Unconditionally flush the file, because
+ fil_system.write_through was updated prematurely,
+ potentially causing some flushes to be lost. */
+ os_file_flush(node->handle);
+ mysql_mutex_lock(&fil_system.mutex);
+ p= space.n_pending.fetch_sub(1, std::memory_order_relaxed) - 1;
+
+ if (!(p & CLOSING) || (p & STOPPING))
+ break;
+
+ if (!(p & PENDING) && !node->being_extended)
+ {
+ ut_a(os_file_close(node->handle));
+ bool success;
+ node->handle= os_file_create(innodb_data_file_key, node->name,
+ node->is_raw_disk
+ ? OS_FILE_OPEN_RAW : OS_FILE_OPEN,
+ OS_FILE_AIO, type,
+ srv_read_only_mode, &success);
+ ut_a(success);
+ goto next_file;
+ }
+ }
+
+ space.reacquire();
+ mysql_mutex_unlock(&fil_system.mutex);
+ std::this_thread::sleep_for(std::chrono::microseconds(100));
+ mysql_mutex_lock(&fil_system.mutex);
+ space.release();
+
+ if (!node->is_open())
+ goto next_file;
+ }
+
+ if (!(p & CLOSING) || (p & STOPPING))
+ next_file:
+ continue;
+
+ sql_print_error("InnoDB: Failed to reopen file '%s' due to " UINT32PF
+ " operations", node->name, p & PENDING);
+ }
+ }
+
+ fil_system.freeze_space_list--;
+}
+
+void fil_system_t::set_write_through(bool write_through)
+{
+ mysql_mutex_lock(&mutex);
+
+ if (write_through != is_write_through())
+ {
+ this->write_through= write_through;
+ fil_space_t::reopen_all();
+ }
+
+ mysql_mutex_unlock(&mutex);
+}
+
+void fil_system_t::set_buffered(bool buffered)
+{
+ mysql_mutex_lock(&mutex);
+
+ if (buffered != is_buffered())
+ {
+ this->buffered= buffered;
+ fil_space_t::reopen_all();
+ }
+
+ mysql_mutex_unlock(&mutex);
+}
+
/** Close all tablespace files at shutdown */
void fil_space_t::close_all()
{
@@ -1361,12 +1476,9 @@ void fil_space_t::close_all()
for (fil_node_t *node= UT_LIST_GET_FIRST(space.chain); node != NULL;
node= UT_LIST_GET_NEXT(chain, node))
{
-
if (!node->is_open())
- {
next:
continue;
- }
for (ulint count= 10000; count--;)
{
@@ -1382,8 +1494,8 @@ void fil_space_t::close_all()
goto next;
}
- ib::error() << "File '" << node->name << "' has " << space.referenced()
- << " operations";
+ sql_print_error("InnoDB: File '%s' has " UINT32PF " operations",
+ node->name, space.referenced());
}
fil_system.detach(&space);
@@ -1626,7 +1738,6 @@ pfs_os_file_t fil_delete_tablespace(uint32_t id)
fil_space_free_low(space);
}
- ibuf_delete_for_discarded_space(id);
return handle;
}
@@ -2619,7 +2730,7 @@ inline void fil_node_t::complete_write()
mysql_mutex_assert_not_owner(&fil_system.mutex);
if (space->purpose != FIL_TYPE_TEMPORARY &&
- srv_file_flush_method != SRV_O_DIRECT_NO_FSYNC &&
+ (!fil_system.is_write_through() && !my_disable_sync) &&
space->set_needs_flush())
{
mysql_mutex_lock(&fil_system.mutex);
@@ -2767,10 +2878,6 @@ write_completed:
{
ut_ad(request.is_read());
- /* IMPORTANT: since i/o handling for reads will read also the insert
- buffer in fil_system.sys_space, we have to be very careful not to
- introduce deadlocks. We never close fil_system.sys_space data
- files and never issue asynchronous reads of change buffer pages. */
const page_id_t id(request.bpage->id());
if (dberr_t err= request.bpage->read_complete(*request.node))
@@ -2795,14 +2902,6 @@ write_completed:
possibly cached by the OS. */
void fil_flush_file_spaces()
{
- if (srv_file_flush_method == SRV_O_DIRECT_NO_FSYNC)
- {
- ut_d(mysql_mutex_lock(&fil_system.mutex));
- ut_ad(fil_system.unflushed_spaces.empty());
- ut_d(mysql_mutex_unlock(&fil_system.mutex));
- return;
- }
-
rescan:
mysql_mutex_lock(&fil_system.mutex);
diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc
index b6971558201..bdc08b22f3a 100644
--- a/storage/innobase/fil/fil0pagecompress.cc
+++ b/storage/innobase/fil/fil0pagecompress.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (C) 2013, 2021, MariaDB Corporation.
+Copyright (C) 2013, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -47,7 +47,6 @@ Updated 14/02/2015
#include "trx0sys.h"
#include "row0mysql.h"
#include "buf0lru.h"
-#include "ibuf0ibuf.h"
#include "zlib.h"
#ifdef __linux__
#include <linux/fs.h>
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index c6044b201fe..f7625974886 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -33,7 +33,6 @@ Created 11/29/1995 Heikki Tuuri
#include "page0page.h"
#include "srv0srv.h"
#include "srv0start.h"
-#include "ibuf0ibuf.h"
#include "btr0btr.h"
#include "btr0sea.h"
#include "dict0boot.h"
@@ -507,7 +506,7 @@ dberr_t fsp_header_init(fil_space_t *space, uint32_t size, mtr_t *mtr)
const page_id_t page_id(space->id, 0);
const ulint zip_size = space->zip_size();
- buf_block_t *free_block = buf_LRU_get_free_block(false);
+ buf_block_t *free_block = buf_LRU_get_free_block(have_no_mutex);
mtr->x_lock_space(space);
@@ -841,9 +840,9 @@ fsp_fill_free_list(
if (i)
{
- buf_block_t *f= buf_LRU_get_free_block(false);
+ buf_block_t *f= buf_LRU_get_free_block(have_no_mutex);
buf_block_t *block= buf_page_create(space, static_cast<uint32_t>(i),
- zip_size, mtr, f);
+ zip_size, mtr, f);
if (UNIV_UNLIKELY(block != f))
buf_pool.free_block(f);
fsp_init_file_page(space, block, mtr);
@@ -853,13 +852,19 @@ fsp_fill_free_list(
if (space->purpose != FIL_TYPE_TEMPORARY)
{
- buf_block_t *f= buf_LRU_get_free_block(false);
+ buf_block_t *f= buf_LRU_get_free_block(have_no_mutex);
buf_block_t *block=
- buf_page_create(space,
- static_cast<uint32_t>(i + FSP_IBUF_BITMAP_OFFSET),
+ buf_page_create(space, static_cast<uint32_t>(i + 1),
zip_size, mtr, f);
if (UNIV_UNLIKELY(block != f))
buf_pool.free_block(f);
+ /* The zero-initialization will reset the change buffer bitmap bits
+ to safe values for possible import to an earlier version that
+ supports change buffering:
+
+ IBUF_BITMAP_FREE = 0 (no space left for buffering inserts)
+ IBUF_BITMAP_BUFFERED = 0 (no changes have been buffered)
+ IBUF_BITMAP_IBUF = 0 (not part of the change buffer) */
fsp_init_file_page(space, block, mtr);
mtr->write<2>(*block, FIL_PAGE_TYPE + block->page.frame,
FIL_PAGE_IBUF_BITMAP);
@@ -884,9 +889,9 @@ fsp_fill_free_list(
if (UNIV_UNLIKELY(init_xdes))
{
/* The first page in the extent is a descriptor page and the
- second is an ibuf bitmap page: mark them used */
+ second was reserved for change buffer bitmap: mark them used */
xdes_set_free<false>(*xdes, descr, 0, mtr);
- xdes_set_free<false>(*xdes, descr, FSP_IBUF_BITMAP_OFFSET, mtr);
+ xdes_set_free<false>(*xdes, descr, 1, mtr);
xdes_set_state(*xdes, descr, XDES_FREE_FRAG, mtr);
if (dberr_t err= flst_add_last(header, FSP_HEADER_OFFSET + FSP_FREE_FRAG,
xdes, xoffset, mtr))
@@ -1055,7 +1060,7 @@ fsp_page_create(fil_space_t *space, page_no_t offset, mtr_t *mtr)
}
}
- free_block= buf_LRU_get_free_block(false);
+ free_block= buf_LRU_get_free_block(have_no_mutex);
got_free_block:
block= buf_page_create(space, static_cast<uint32_t>(offset),
space->zip_size(), mtr, free_block);
diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc
index 83afd732b21..60218a132c9 100644
--- a/storage/innobase/gis/gis0rtree.cc
+++ b/storage/innobase/gis/gis0rtree.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2018, 2022, MariaDB Corporation.
+Copyright (c) 2018, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -34,7 +34,6 @@ Created 2013/03/27 Allen Lai and Jimmy Yang
#include "btr0pcur.h"
#include "rem0cmp.h"
#include "lock0lock.h"
-#include "ibuf0ibuf.h"
#include "trx0undo.h"
#include "srv0mon.h"
#include "gis0geo.h"
@@ -538,7 +537,7 @@ err_exit:
mem_heap_free(heap);
}
-MY_ATTRIBUTE((nonnull, warn_unused_result))
+MY_ATTRIBUTE((nonnull(1,3,4,5,6,8), warn_unused_result))
/**************************************************************//**
Update parent page's MBR and Predicate lock information during a split */
static
@@ -552,6 +551,7 @@ rtr_adjust_upper_level(
buf_block_t* new_block, /*!< in/out: the new half page */
rtr_mbr_t* mbr, /*!< in: MBR on the old page */
rtr_mbr_t* new_mbr, /*!< in: MBR on the new page */
+ que_thr_t* thr, /*!< in/out: query thread */
mtr_t* mtr) /*!< in: mtr */
{
ulint page_no;
@@ -570,7 +570,6 @@ rtr_adjust_upper_level(
/* Create a memory heap where the data tuple is stored */
heap = mem_heap_create(1024);
- cursor.thr = sea_cur->thr;
cursor.page_cur.index = sea_cur->index();
cursor.page_cur.block = block;
@@ -584,7 +583,8 @@ rtr_adjust_upper_level(
/* Set new mbr for the old page on the upper level. */
/* Look up the index for the node pointer to page */
- offsets = rtr_page_get_father_block(NULL, heap, mtr, sea_cur, &cursor);
+ offsets = rtr_page_get_father_block(nullptr, heap, sea_cur, &cursor,
+ thr, mtr);
page_cursor = btr_cur_get_page_cur(&cursor);
@@ -669,7 +669,7 @@ rtr_adjust_upper_level(
if (next_page_no == FIL_NULL) {
} else if (buf_block_t* next_block =
btr_block_get(*sea_cur->index(), next_page_no, RW_X_LATCH,
- false, mtr, &err)) {
+ mtr, &err)) {
if (UNIV_UNLIKELY(memcmp_aligned<4>(next_block->page.frame
+ FIL_PAGE_PREV,
block->page.frame
@@ -691,11 +691,6 @@ rtr_adjust_upper_level(
/*************************************************************//**
Moves record list to another page for rtree splitting.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if new_block is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
@return error code
@retval DB_FAIL on ROW_FORMAT=COMPRESSED compression failure */
static
@@ -731,8 +726,7 @@ rtr_split_page_move_rec_list(
ulint max_to_move = 0;
rtr_rec_move_t* rec_move = NULL;
- ut_ad(!dict_index_is_ibuf(index));
- ut_ad(dict_index_is_spatial(index));
+ ut_ad(index->is_spatial());
rec_offs_init(offsets_);
@@ -867,7 +861,8 @@ rtr_page_split_and_insert(
const dtuple_t* tuple, /*!< in: tuple to insert */
ulint n_ext, /*!< in: number of externally stored columns */
mtr_t* mtr, /*!< in: mtr */
- dberr_t* err) /*!< out: error code */
+ dberr_t* err, /*!< out: error code */
+ que_thr_t* thr) /*!< in: query thread */
{
buf_block_t* block;
page_t* page;
@@ -895,6 +890,8 @@ rtr_page_split_and_insert(
int first_rec_group = 1;
IF_DBUG(bool iterated = false,);
+ buf_pool.pages_split++;
+
if (!*heap) {
*heap = mem_heap_create(1024);
}
@@ -1159,7 +1156,7 @@ after_insert:
/* Adjust the upper level. */
*err = rtr_adjust_upper_level(cursor, flags, block, new_block,
- &mbr, &new_mbr, mtr);
+ &mbr, &new_mbr, thr, mtr);
if (UNIV_UNLIKELY(*err != DB_SUCCESS)) {
return nullptr;
}
@@ -1179,13 +1176,6 @@ after_insert:
/* If the new res insert fail, we need to do another split
again. */
if (!rec) {
- /* We play safe and reset the free bits for new_page */
- if (!dict_index_is_clust(cursor->index())
- && !cursor->index()->table->is_temporary()) {
- ibuf_reset_free_bits(new_block);
- ibuf_reset_free_bits(block);
- }
-
/* We need to clean the parent path here and search father
node later, otherwise, it's possible that find a wrong
parent. */
@@ -1212,6 +1202,244 @@ after_insert:
return(rec);
}
+/*************************************************************//**
+Makes tree one level higher by splitting the root, and inserts the tuple.
+NOTE that the operation of this function must always succeed,
+we cannot reverse it: therefore enough free disk space must be
+guaranteed to be available before this function is called.
+@return inserted record */
+rec_t*
+rtr_root_raise_and_insert(
+/*======================*/
+ ulint flags, /*!< in: undo logging and locking flags */
+ btr_cur_t* cursor, /*!< in: cursor at which to insert: must be
+ on the root page; when the function returns,
+ the cursor is positioned on the predecessor
+ of the inserted record */
+ rec_offs** offsets,/*!< out: offsets on inserted record */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
+ const dtuple_t* tuple, /*!< in: tuple to insert */
+ ulint n_ext, /*!< in: number of externally stored columns */
+ mtr_t* mtr, /*!< in: mtr */
+ dberr_t* err, /*!< out: error code */
+ que_thr_t* thr) /*!< in: query thread */
+{
+ dict_index_t* index;
+ rec_t* rec;
+ dtuple_t* node_ptr;
+ ulint level;
+ rec_t* node_ptr_rec;
+ page_cur_t* page_cursor;
+ page_zip_des_t* root_page_zip;
+ page_zip_des_t* new_page_zip;
+ buf_block_t* root;
+ buf_block_t* new_block;
+
+ root = btr_cur_get_block(cursor);
+ root_page_zip = buf_block_get_page_zip(root);
+ ut_ad(!page_is_empty(root->page.frame));
+ index = btr_cur_get_index(cursor);
+ ut_ad(index->is_spatial());
+#ifdef UNIV_ZIP_DEBUG
+ ut_a(!root_page_zip
+ || page_zip_validate(root_page_zip, root->page.frame, index));
+#endif /* UNIV_ZIP_DEBUG */
+
+ const page_id_t root_id{root->page.id()};
+
+ ut_ad(mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
+ | MTR_MEMO_SX_LOCK));
+ ut_ad(mtr->memo_contains_flagged(root, MTR_MEMO_PAGE_X_FIX));
+
+ if (index->page != root_id.page_no()) {
+ ut_ad("corrupted root page number" == 0);
+ return nullptr;
+ }
+
+ if (!btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF,
+ *root, *index->table->space)
+ || !btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP,
+ *root, *index->table->space)) {
+ return nullptr;
+ }
+
+ /* Allocate a new page to the tree. Root splitting is done by first
+ moving the root records to the new page, emptying the root, putting
+ a node pointer to the new page, and then splitting the new page. */
+
+ level = btr_page_get_level(root->page.frame);
+
+ new_block = btr_page_alloc(index, 0, FSP_NO_DIR, level, mtr, mtr, err);
+
+ if (!new_block) {
+ return nullptr;
+ }
+
+ new_page_zip = buf_block_get_page_zip(new_block);
+ ut_a(!new_page_zip == !root_page_zip);
+ ut_a(!new_page_zip
+ || page_zip_get_size(new_page_zip)
+ == page_zip_get_size(root_page_zip));
+
+ btr_page_create(new_block, new_page_zip, index, level, mtr);
+ if (page_has_siblings(new_block->page.frame)) {
+ compile_time_assert(FIL_PAGE_NEXT == FIL_PAGE_PREV + 4);
+ compile_time_assert(FIL_NULL == 0xffffffff);
+ static_assert(FIL_PAGE_PREV % 8 == 0, "alignment");
+ memset_aligned<8>(new_block->page.frame + FIL_PAGE_PREV,
+ 0xff, 8);
+ mtr->memset(new_block, FIL_PAGE_PREV, 8, 0xff);
+ if (UNIV_LIKELY_NULL(new_page_zip)) {
+ memset_aligned<8>(new_page_zip->data + FIL_PAGE_PREV,
+ 0xff, 8);
+ }
+ }
+
+ /* Copy the records from root to the new page one by one. */
+ dberr_t e;
+ if (!err) {
+ err = &e;
+ }
+
+ if (0
+#ifdef UNIV_ZIP_COPY
+ || new_page_zip
+#endif /* UNIV_ZIP_COPY */
+ || !page_copy_rec_list_end(new_block, root,
+ page_get_infimum_rec(root->page.frame),
+ index, mtr, err)) {
+ switch (*err) {
+ case DB_SUCCESS:
+ break;
+ case DB_FAIL:
+ *err = DB_SUCCESS;
+ break;
+ default:
+ return nullptr;
+ }
+
+ ut_a(new_page_zip);
+
+ /* Copy the page byte for byte. */
+ page_zip_copy_recs(new_block, root_page_zip,
+ root->page.frame, index, mtr);
+
+ /* Update the lock table and possible hash index. */
+ if (index->has_locking()) {
+ lock_move_rec_list_end(
+ new_block, root,
+ page_get_infimum_rec(root->page.frame));
+ }
+
+ /* Move any existing predicate locks */
+ lock_prdt_rec_move(new_block, root_id);
+ }
+
+ constexpr uint16_t max_trx_id = PAGE_HEADER + PAGE_MAX_TRX_ID;
+ if (!index->is_primary()) {
+ /* In secondary indexes,
+ PAGE_MAX_TRX_ID can be reset on the root page, because
+ the field only matters on leaf pages, and the root no
+ longer is a leaf page. (Older versions of InnoDB did
+ set PAGE_MAX_TRX_ID on all secondary index pages.) */
+ byte* p = my_assume_aligned<8>(
+ PAGE_HEADER + PAGE_MAX_TRX_ID + root->page.frame);
+ if (mach_read_from_8(p)) {
+ mtr->memset(root, max_trx_id, 8, 0);
+ if (UNIV_LIKELY_NULL(root->page.zip.data)) {
+ memset_aligned<8>(max_trx_id
+ + root->page.zip.data, 0, 8);
+ }
+ }
+ } else {
+ /* PAGE_ROOT_AUTO_INC is only present in the clustered index
+ root page; on other clustered index pages, we want to reserve
+ the field PAGE_MAX_TRX_ID for future use. */
+ byte* p = my_assume_aligned<8>(
+ PAGE_HEADER + PAGE_MAX_TRX_ID + new_block->page.frame);
+ if (mach_read_from_8(p)) {
+ mtr->memset(new_block, max_trx_id, 8, 0);
+ if (UNIV_LIKELY_NULL(new_block->page.zip.data)) {
+ memset_aligned<8>(max_trx_id
+ + new_block->page.zip.data,
+ 0, 8);
+ }
+ }
+ }
+
+ /* If this is a pessimistic insert which is actually done to
+ perform a pessimistic update then we have stored the lock
+ information of the record to be inserted on the infimum of the
+ root page: we cannot discard the lock structs on the root page */
+
+ if (index->has_locking()) {
+ lock_update_root_raise(*new_block, root_id);
+ }
+
+ /* Create a memory heap where the node pointer is stored */
+ if (!*heap) {
+ *heap = mem_heap_create(1000);
+ }
+
+ const uint32_t new_page_no = new_block->page.id().page_no();
+ rec = page_rec_get_next(page_get_infimum_rec(new_block->page.frame));
+ ut_ad(rec); /* We just created the page. */
+
+ /* Build the node pointer (= node key and page address) for the
+ child */
+ rtr_mbr_t new_mbr;
+ rtr_page_cal_mbr(index, new_block, &new_mbr, *heap);
+ node_ptr = rtr_index_build_node_ptr(index, &new_mbr, rec, new_page_no,
+ *heap);
+ /* The node pointer must be marked as the predefined minimum record,
+ as there is no lower alphabetical limit to records in the leftmost
+ node of a level: */
+ dtuple_set_info_bits(node_ptr,
+ dtuple_get_info_bits(node_ptr)
+ | REC_INFO_MIN_REC_FLAG);
+
+ /* Rebuild the root page to get free space */
+ btr_page_empty(root, root_page_zip, index, level + 1, mtr);
+ ut_ad(!page_has_siblings(root->page.frame));
+
+ page_cursor = btr_cur_get_page_cur(cursor);
+
+ /* Insert node pointer to the root */
+
+ page_cur_set_before_first(root, page_cursor);
+
+ node_ptr_rec = page_cur_tuple_insert(page_cursor, node_ptr,
+ offsets, heap, 0, mtr);
+
+ /* The root page should only contain the node pointer
+ to new_block at this point. Thus, the data should fit. */
+ ut_a(node_ptr_rec);
+
+ page_cursor->block = new_block;
+ page_cursor->index = index;
+
+ if (tuple) {
+ ut_ad(dtuple_check_typed(tuple));
+ /* Reposition the cursor to the child node */
+ ulint low_match = 0, up_match = 0;
+
+ if (page_cur_search_with_match(tuple, PAGE_CUR_LE,
+ &up_match, &low_match,
+ page_cursor, nullptr)) {
+ if (err) {
+ *err = DB_CORRUPTION;
+ }
+ return nullptr;
+ }
+ } else {
+ page_cursor->rec = page_get_infimum_rec(new_block->page.frame);
+ }
+
+ /* Split the child and insert tuple */
+ return rtr_page_split_and_insert(flags, cursor, offsets, heap,
+ tuple, n_ext, mtr, err, thr);
+}
+
/****************************************************************//**
Following the right link to find the proper block for insert.
@return the proper block.*/
@@ -1240,6 +1468,7 @@ rtr_ins_enlarge_mbr(
/* Check path info is not empty. */
ut_ad(!btr_cur->rtr_info->parent_path->empty());
+ ut_ad(btr_cur->rtr_info->thr || !btr_cur->index()->is_committed());
/* Create a memory heap. */
heap = mem_heap_create(1024);
@@ -1265,7 +1494,8 @@ rtr_ins_enlarge_mbr(
cursor.page_cur.index = page_cursor->index;
cursor.page_cur.block = block;
offsets = rtr_page_get_father_block(
- NULL, heap, mtr, btr_cur, &cursor);
+ nullptr, heap, btr_cur, &cursor,
+ btr_cur->rtr_info->thr, mtr);
page = buf_block_get_frame(block);
diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc
index 8ca8681bce9..43fcf5c82c8 100644
--- a/storage/innobase/gis/gis0sea.cc
+++ b/storage/innobase/gis/gis0sea.cc
@@ -34,7 +34,6 @@ Created 2014/01/16 Jimmy Yang
#include "btr0pcur.h"
#include "rem0cmp.h"
#include "lock0lock.h"
-#include "ibuf0ibuf.h"
#include "trx0trx.h"
#include "srv0mon.h"
#include "que0que.h"
@@ -114,8 +113,8 @@ rtr_latch_leaves(
left_page_no = btr_page_get_prev(block->page.frame);
if (left_page_no != FIL_NULL) {
- btr_block_get(*cursor->index(), left_page_no, RW_X_LATCH,
- true, mtr);
+ btr_block_get(*cursor->index(), left_page_no,
+ RW_X_LATCH, mtr);
}
mtr->upgrade_buffer_fix(block_savepoint, RW_X_LATCH);
@@ -124,7 +123,7 @@ rtr_latch_leaves(
if (right_page_no != FIL_NULL) {
btr_block_get(*cursor->index(), right_page_no,
- RW_X_LATCH, true, mtr);
+ RW_X_LATCH, mtr);
}
break;
case BTR_SEARCH_LEAF:
@@ -541,10 +540,10 @@ static void rtr_compare_cursor_rec(const rec_t *rec, dict_index_t *index,
#endif
TRANSACTIONAL_TARGET
-dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
- page_cur_mode_t mode,
- btr_latch_mode latch_mode,
- btr_cur_t *cur, mtr_t *mtr)
+dberr_t rtr_search_to_nth_level(btr_cur_t *cur, que_thr_t *thr,
+ const dtuple_t *tuple,
+ btr_latch_mode latch_mode, mtr_t *mtr,
+ page_cur_mode_t mode, ulint level)
{
page_cur_mode_t page_mode;
page_cur_mode_t search_mode= PAGE_CUR_UNSUPP;
@@ -667,7 +666,7 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
dberr_t err;
auto block_savepoint= mtr->get_savepoint();
buf_block_t *block= buf_page_get_gen(page_id, zip_size, rw_latch, guess,
- buf_mode, mtr, &err, false);
+ buf_mode, mtr, &err);
if (!block)
{
if (err == DB_DECRYPTION_FAILED)
@@ -725,7 +724,7 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
index->set_ssn(page_get_ssn_id(page) + 1);
/* Save the MBR */
- cur->rtr_info->thr= cur->thr;
+ cur->rtr_info->thr= thr;
rtr_get_mbr_from_tuple(tuple, &cur->rtr_info->mbr);
#ifdef BTR_CUR_ADAPT
@@ -833,7 +832,7 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
lock_prdt_t prdt;
{
- trx_t* trx= thr_get_trx(cur->thr);
+ trx_t* trx= thr_get_trx(thr);
TMLockTrxGuard g{TMLockTrxArgs(*trx)};
lock_init_prdt_from_mbr(&prdt, &cur->rtr_info->mbr, mode,
trx->lock.lock_heap);
@@ -842,7 +841,7 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
if (rw_latch == RW_NO_LATCH && height != 0)
block->page.lock.s_lock();
- lock_prdt_lock(block, &prdt, index, LOCK_S, LOCK_PREDICATE, cur->thr);
+ lock_prdt_lock(block, &prdt, index, LOCK_S, LOCK_PREDICATE, thr);
if (rw_latch == RW_NO_LATCH && height != 0)
block->page.lock.s_unlock();
@@ -950,7 +949,7 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
if (upper_rw_latch == RW_NO_LATCH)
{
ut_ad(latch_mode == BTR_CONT_MODIFY_TREE);
- btr_block_get(*index, page_id.page_no(), RW_X_LATCH, false, mtr, &err);
+ btr_block_get(*index, page_id.page_no(), RW_X_LATCH, mtr, &err);
}
else
{
@@ -979,19 +978,21 @@ dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
goto func_exit;
}
-dberr_t rtr_search_leaf(btr_cur_t *cur, const dtuple_t *tuple,
+dberr_t rtr_search_leaf(btr_cur_t *cur, que_thr_t *thr, const dtuple_t *tuple,
btr_latch_mode latch_mode,
mtr_t *mtr, page_cur_mode_t mode)
{
- return rtr_search_to_nth_level(0, tuple, mode, latch_mode, cur, mtr);
+ return rtr_search_to_nth_level(cur, thr, tuple, latch_mode, mtr, mode, 0);
}
/** Search for a spatial index leaf page record.
-@param pcur cursor
+@param pcur cursor
+@param thr query thread
@param tuple search tuple
@param mode search mode
@param mtr mini-transaction */
-dberr_t rtr_search_leaf(btr_pcur_t *pcur, const dtuple_t *tuple,
+dberr_t rtr_search_leaf(btr_pcur_t *pcur, que_thr_t *thr,
+ const dtuple_t *tuple,
page_cur_mode_t mode, mtr_t *mtr)
{
#ifdef UNIV_DEBUG
@@ -1010,7 +1011,8 @@ dberr_t rtr_search_leaf(btr_pcur_t *pcur, const dtuple_t *tuple,
pcur->search_mode= mode;
pcur->pos_state= BTR_PCUR_IS_POSITIONED;
pcur->trx_if_known= nullptr;
- return rtr_search_leaf(&pcur->btr_cur, tuple, BTR_SEARCH_LEAF, mtr, mode);
+ return rtr_search_leaf(&pcur->btr_cur, thr, tuple, BTR_SEARCH_LEAF, mtr,
+ mode);
}
/**************************************************************//**
@@ -1020,6 +1022,7 @@ bool rtr_search(
const dtuple_t* tuple, /*!< in: tuple on which search done */
btr_latch_mode latch_mode,/*!< in: BTR_MODIFY_LEAF, ... */
btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */
+ que_thr_t* thr, /*!< in/out; query thread */
mtr_t* mtr) /*!< in: mtr */
{
static_assert(BTR_MODIFY_TREE == (8 | BTR_MODIFY_LEAF), "");
@@ -1048,15 +1051,16 @@ bool rtr_search(
btr_cur_t* btr_cursor = btr_pcur_get_btr_cur(cursor);
btr_cursor->rtr_info
- = rtr_create_rtr_info(false, false,
- btr_cursor, cursor->index());
+ = rtr_create_rtr_info(false, false, thr, btr_cursor);
- if (btr_cursor->thr) {
+ if (!thr) {
+ /* Purge will U lock the tree instead of take Page Locks */
+ } else {
btr_cursor->rtr_info->need_page_lock = true;
- btr_cursor->rtr_info->thr = btr_cursor->thr;
+ btr_cursor->rtr_info->thr = thr;
}
- if (rtr_search_leaf(btr_cursor, tuple, latch_mode, mtr)
+ if (rtr_search_leaf(btr_cursor, thr, tuple, latch_mode, mtr)
!= DB_SUCCESS) {
return true;
}
@@ -1103,12 +1107,14 @@ bool rtr_search(
about parent nodes in search
@param[out] cursor cursor on node pointer record,
its page x-latched
+@param[in,out] thr query thread
@return whether the cursor was successfully positioned */
-bool rtr_page_get_father(mtr_t *mtr, btr_cur_t *sea_cur, btr_cur_t *cursor)
+bool rtr_page_get_father(mtr_t *mtr, btr_cur_t *sea_cur, btr_cur_t *cursor,
+ que_thr_t *thr)
{
mem_heap_t *heap = mem_heap_create(100);
rec_offs *offsets= rtr_page_get_father_block(nullptr, heap,
- mtr, sea_cur, cursor);
+ sea_cur, cursor, thr, mtr);
mem_heap_free(heap);
return offsets != nullptr;
}
@@ -1125,12 +1131,13 @@ static const rec_t* rtr_get_father_node(
btr_cur_t* sea_cur,/*!< in: search cursor */
btr_cur_t* btr_cur,/*!< in/out: tree cursor; the cursor page is
s- or x-latched, but see also above! */
+ que_thr_t* thr, /*!< in/out: query thread */
ulint page_no,/*!< Current page no */
mtr_t* mtr) /*!< in: mtr */
{
const rec_t* rec = nullptr;
auto had_rtr = btr_cur->rtr_info;
- dict_index_t* const index = btr_cur->index();
+ ut_d(dict_index_t* const index = btr_cur->index());
/* Try to optimally locate the parent node. Level should always
less than sea_cur->tree_height unless the root is splitting */
@@ -1161,10 +1168,10 @@ static const rec_t* rtr_get_father_node(
rtr_clean_rtr_info(btr_cur->rtr_info, true);
}
- btr_cur->rtr_info = rtr_create_rtr_info(false, false, btr_cur, index);
+ btr_cur->rtr_info = rtr_create_rtr_info(false, false, thr, btr_cur);
- if (rtr_search_to_nth_level(level, tuple, PAGE_CUR_RTREE_LOCATE,
- BTR_CONT_MODIFY_TREE, btr_cur, mtr)
+ if (rtr_search_to_nth_level(btr_cur, thr, tuple, BTR_CONT_MODIFY_TREE,
+ mtr, PAGE_CUR_RTREE_LOCATE, level)
!= DB_SUCCESS) {
} else if (sea_cur && sea_cur->tree_height == level) {
rec = btr_cur_get_rec(btr_cur);
@@ -1212,6 +1219,7 @@ rtr_page_get_father_node_ptr(
btr_cur_t* cursor, /*!< in: cursor pointing to user record,
out: cursor on node pointer record,
its page x-latched */
+ que_thr_t* thr, /*!< in/out: query thread */
mtr_t* mtr) /*!< in: mtr */
{
dtuple_t* tuple;
@@ -1247,7 +1255,7 @@ rtr_page_get_father_node_ptr(
const rec_t* node_ptr = rtr_get_father_node(level + 1, tuple,
sea_cur, cursor,
- page_no, mtr);
+ thr, page_no, mtr);
if (!node_ptr) {
return nullptr;
}
@@ -1273,18 +1281,20 @@ rtr_page_get_father_block(
/*======================*/
rec_offs* offsets,/*!< in: work area for the return value */
mem_heap_t* heap, /*!< in: memory heap to use */
- mtr_t* mtr, /*!< in: mtr */
btr_cur_t* sea_cur,/*!< in: search cursor, contains information
about parent nodes in search */
- btr_cur_t* cursor) /*!< out: cursor on node pointer record,
+ btr_cur_t* cursor, /*!< out: cursor on node pointer record,
its page x-latched */
+ que_thr_t* thr, /*!< in/out: query thread */
+ mtr_t* mtr) /*!< in/out: mtr */
{
rec_t *rec=
page_rec_get_next(page_get_infimum_rec(cursor->block()->page.frame));
if (!rec)
return nullptr;
cursor->page_cur.rec= rec;
- return rtr_page_get_father_node_ptr(offsets, heap, sea_cur, cursor, mtr);
+ return rtr_page_get_father_node_ptr(offsets, heap, sea_cur, cursor,
+ thr, mtr);
}
/*******************************************************************//**
@@ -1297,12 +1307,12 @@ rtr_create_rtr_info(
bool init_matches, /*!< in: Whether to initiate the
"matches" structure for collecting
matched leaf records */
- btr_cur_t* cursor, /*!< in: tree search cursor */
- dict_index_t* index) /*!< in: index struct */
+ que_thr_t* thr, /*!< in/out: query thread */
+ btr_cur_t* cursor) /*!< in: tree search cursor */
{
rtr_info_t* rtr_info;
- index = index ? index : cursor->index();
+ dict_index_t* index = cursor->index();
ut_ad(index);
rtr_info = static_cast<rtr_info_t*>(ut_zalloc_nokey(sizeof(*rtr_info)));
@@ -1310,6 +1320,7 @@ rtr_create_rtr_info(
rtr_info->allocated = true;
rtr_info->cursor = cursor;
rtr_info->index = index;
+ rtr_info->thr = thr;
if (init_matches) {
rtr_info->heap = mem_heap_create(sizeof(*(rtr_info->matches)));
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 0825d527dfc..d18f85f4ac5 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -87,7 +87,6 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include "fts0plugin.h"
#include "fts0priv.h"
#include "fts0types.h"
-#include "ibuf0ibuf.h"
#include "lock0lock.h"
#include "log0crypt.h"
#include "mtr0mtr.h"
@@ -366,6 +365,8 @@ const char* innodb_flush_method_names[] = {
NullS
};
+static constexpr ulong innodb_flush_method_default = IF_WIN(6,4);
+
/** Enumeration of innodb_flush_method */
TYPELIB innodb_flush_method_typelib = {
array_elements(innodb_flush_method_names) - 1,
@@ -374,6 +375,9 @@ TYPELIB innodb_flush_method_typelib = {
NULL
};
+/** Deprecated parameter */
+static ulong innodb_flush_method;
+
/** Names of allowed values of innodb_deadlock_report */
static const char *innodb_deadlock_report_names[]= {
"off", /* Do not report any details of deadlocks */
@@ -394,25 +398,6 @@ static TYPELIB innodb_deadlock_report_typelib = {
NULL
};
-/** Allowed values of innodb_change_buffering */
-static const char* innodb_change_buffering_names[] = {
- "none", /* IBUF_USE_NONE */
- "inserts", /* IBUF_USE_INSERT */
- "deletes", /* IBUF_USE_DELETE_MARK */
- "changes", /* IBUF_USE_INSERT_DELETE_MARK */
- "purges", /* IBUF_USE_DELETE */
- "all", /* IBUF_USE_ALL */
- NullS
-};
-
-/** Enumeration of innodb_change_buffering */
-static TYPELIB innodb_change_buffering_typelib = {
- array_elements(innodb_change_buffering_names) - 1,
- "innodb_change_buffering_typelib",
- innodb_change_buffering_names,
- NULL
-};
-
/** Allowed values of innodb_instant_alter_column_allowed */
const char* innodb_instant_alter_column_allowed_names[] = {
"never", /* compatible with MariaDB 5.5 to 10.2 */
@@ -526,9 +511,6 @@ mysql_pfs_key_t fts_cache_mutex_key;
mysql_pfs_key_t fts_cache_init_mutex_key;
mysql_pfs_key_t fts_delete_mutex_key;
mysql_pfs_key_t fts_doc_id_mutex_key;
-mysql_pfs_key_t ibuf_bitmap_mutex_key;
-mysql_pfs_key_t ibuf_mutex_key;
-mysql_pfs_key_t ibuf_pessimistic_insert_mutex_key;
mysql_pfs_key_t recalc_pool_mutex_key;
mysql_pfs_key_t purge_sys_pq_mutex_key;
mysql_pfs_key_t recv_sys_mutex_key;
@@ -560,8 +542,6 @@ static PSI_mutex_info all_innodb_mutexes[] = {
PSI_KEY(fts_cache_init_mutex),
PSI_KEY(fts_delete_mutex),
PSI_KEY(fts_doc_id_mutex),
- PSI_KEY(ibuf_mutex),
- PSI_KEY(ibuf_pessimistic_insert_mutex),
PSI_KEY(index_online_log),
PSI_KEY(page_zip_stat_per_index_mutex),
PSI_KEY(purge_sys_pq_mutex),
@@ -962,20 +942,6 @@ static SHOW_VAR innodb_status_variables[]= {
{"dblwr_writes", &export_vars.innodb_dblwr_writes, SHOW_SIZE_T},
{"deadlocks", &lock_sys.deadlocks, SHOW_SIZE_T},
{"history_list_length", &export_vars.innodb_history_list_length,SHOW_SIZE_T},
- {"ibuf_discarded_delete_marks", &ibuf.n_discarded_ops[IBUF_OP_DELETE_MARK],
- SHOW_SIZE_T},
- {"ibuf_discarded_deletes", &ibuf.n_discarded_ops[IBUF_OP_DELETE],
- SHOW_SIZE_T},
- {"ibuf_discarded_inserts", &ibuf.n_discarded_ops[IBUF_OP_INSERT],
- SHOW_SIZE_T},
- {"ibuf_free_list", &ibuf.free_list_len, SHOW_SIZE_T},
- {"ibuf_merged_delete_marks", &ibuf.n_merged_ops[IBUF_OP_DELETE_MARK],
- SHOW_SIZE_T},
- {"ibuf_merged_deletes", &ibuf.n_merged_ops[IBUF_OP_DELETE], SHOW_SIZE_T},
- {"ibuf_merged_inserts", &ibuf.n_merged_ops[IBUF_OP_INSERT], SHOW_SIZE_T},
- {"ibuf_merges", &ibuf.n_merges, SHOW_SIZE_T},
- {"ibuf_segment_size", &ibuf.seg_size, SHOW_SIZE_T},
- {"ibuf_size", &ibuf.size, SHOW_SIZE_T},
{"log_waits", &log_sys.waits, SHOW_SIZE_T},
{"log_write_requests", &log_sys.write_to_buf, SHOW_SIZE_T},
{"log_writes", &log_sys.write_to_log, SHOW_SIZE_T},
@@ -3913,8 +3879,6 @@ static int innodb_init_params()
DBUG_RETURN(HA_ERR_INITIALIZATION);
}
- DBUG_ASSERT(innodb_change_buffering <= IBUF_USE_ALL);
-
/* Check that interdependent parameters have sane values. */
if (srv_max_buf_pool_modified_pct < srv_max_dirty_pages_pct_lwm) {
sql_print_warning("InnoDB: innodb_max_dirty_pages_pct_lwm"
@@ -3991,27 +3955,27 @@ static int innodb_init_params()
fts_sort_pll_degree = num_pll_degree;
- /* Store the default charset-collation number of this MySQL
- installation */
-
- data_mysql_default_charset_coll = (ulint) default_charset_info->number;
-
+ if (innodb_flush_method == 1 /* O_DSYNC */) {
+ log_sys.log_write_through = true;
+ fil_system.write_through = true;
+ fil_system.buffered = false;
+#if defined __linux__ || defined _WIN32
+ log_sys.log_buffered = false;
+ goto skip_buffering_tweak;
+#endif
+ } else if (innodb_flush_method >= 4 /* O_DIRECT */
+ IF_WIN(&& innodb_flush_method < 8 /* normal */,)) {
+ /* O_DIRECT and similar settings do nothing */
#ifndef _WIN32
- if (srv_use_atomic_writes && my_may_have_atomic_write) {
- /*
- Force O_DIRECT on Unixes (on Windows writes are always
- unbuffered)
- */
- switch (srv_file_flush_method) {
- case SRV_O_DIRECT:
- case SRV_O_DIRECT_NO_FSYNC:
- break;
- default:
- srv_file_flush_method = SRV_O_DIRECT;
- fprintf(stderr, "InnoDB: using O_DIRECT due to atomic writes.\n");
- }
- }
+ } else if (srv_use_atomic_writes && my_may_have_atomic_write) {
+ /* If atomic writes are enabled, do the same as with
+ innodb_flush_method=O_DIRECT: retain the default settings */
#endif
+ } else {
+ log_sys.log_write_through = false;
+ fil_system.write_through = false;
+ fil_system.buffered = true;
+ }
#if defined __linux__ || defined _WIN32
if (srv_flush_log_at_trx_commit == 2) {
@@ -4019,6 +3983,7 @@ static int innodb_init_params()
innodb_flush_log_at_trx_commit=2. */
log_sys.log_buffered = true;
}
+skip_buffering_tweak:
#endif
if (srv_read_only_mode) {
@@ -4026,12 +3991,6 @@ static int innodb_init_params()
srv_use_doublewrite_buf = FALSE;
}
-#if !defined LINUX_NATIVE_AIO && !defined HAVE_URING && !defined _WIN32
- /* Currently native AIO is supported only on windows and linux
- and that also when the support is compiled in. In all other
- cases, we ignore the setting of innodb_use_native_aio. */
- srv_use_native_aio = FALSE;
-#endif
#ifdef HAVE_URING
if (srv_use_native_aio && io_uring_may_be_unsafe) {
sql_print_warning("innodb_use_native_aio may cause "
@@ -4039,28 +3998,39 @@ static int innodb_init_params()
"https://jira.mariadb.org/browse/MDEV-26674",
io_uring_may_be_unsafe);
}
+#elif !defined LINUX_NATIVE_AIO && !defined _WIN32
+ /* Currently native AIO is supported only on windows and linux
+ and that also when the support is compiled in. In all other
+ cases, we ignore the setting of innodb_use_native_aio. */
+ srv_use_native_aio = FALSE;
#endif
-#ifndef _WIN32
- ut_ad(srv_file_flush_method <= SRV_O_DIRECT_NO_FSYNC);
-#else
- switch (srv_file_flush_method) {
- case SRV_ALL_O_DIRECT_FSYNC + 1 /* "async_unbuffered"="unbuffered" */:
- srv_file_flush_method = SRV_ALL_O_DIRECT_FSYNC;
- break;
- case SRV_ALL_O_DIRECT_FSYNC + 2 /* "normal"="fsync" */:
- srv_file_flush_method = SRV_FSYNC;
- break;
- default:
- ut_ad(srv_file_flush_method <= SRV_ALL_O_DIRECT_FSYNC);
- }
-#endif
innodb_buffer_pool_size_init();
srv_lock_table_size = 5 * (srv_buf_pool_size >> srv_page_size_shift);
DBUG_RETURN(0);
}
+
+/*********************************************************************//**
+Setup costs factors for InnoDB to be able to approximate how many
+ms different opperations takes. See cost functions in handler.h how
+the different variables are used */
+
+static void innobase_update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+ /*
+ The following number was found by check_costs.pl when using 1M rows
+ and all rows are cached. See optimizer_costs.txt for details
+ */
+ costs->row_next_find_cost= 0.00007013;
+ costs->row_lookup_cost= 0.00076597;
+ costs->key_next_find_cost= 0.00009900;
+ costs->key_lookup_cost= 0.00079112;
+ costs->row_copy_cost= 0.00006087;
+}
+
+
/** Initialize the InnoDB storage engine plugin.
@param[in,out] p InnoDB handlerton
@return error code
@@ -4128,6 +4098,8 @@ static int innodb_init(void* p)
innobase_hton->prepare_commit_versioned
= innodb_prepare_commit_versioned;
+ innobase_hton->update_optimizer_costs= innobase_update_optimizer_costs;
+
innodb_remember_check_sysvar_funcs();
compile_time_assert(DATA_MYSQL_TRUE_VARCHAR == MYSQL_TYPE_VARCHAR);
@@ -4208,8 +4180,6 @@ static int innodb_init(void* p)
innobase_old_blocks_pct = buf_LRU_old_ratio_update(
innobase_old_blocks_pct, true);
- ibuf_max_size_update(srv_change_buffer_max_size);
-
mysql_mutex_init(pending_checkpoint_mutex_key,
&log_requests.mutex,
MY_MUTEX_INIT_FAST);
@@ -4340,7 +4310,7 @@ innobase_start_trx_and_assign_read_view(
Do this only if transaction is using REPEATABLE READ isolation
level. */
trx->isolation_level = innobase_map_isolation_level(
- thd_get_trx_isolation(thd));
+ thd_get_trx_isolation(thd)) & 3;
if (trx->isolation_level == TRX_ISO_REPEATABLE_READ) {
trx->read_view.open(trx);
@@ -5077,13 +5047,11 @@ ha_innobase::index_flags(
}
ulong flags= key == table_share->primary_key
- ? HA_CLUSTERED_INDEX : 0;
+ ? HA_CLUSTERED_INDEX : HA_KEYREAD_ONLY | HA_DO_RANGE_FILTER_PUSHDOWN;
flags |= HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER
- | HA_READ_RANGE | HA_KEYREAD_ONLY
- | HA_DO_INDEX_COND_PUSHDOWN
- | HA_DO_RANGE_FILTER_PUSHDOWN;
-
+ | HA_READ_RANGE
+ | HA_DO_INDEX_COND_PUSHDOWN;
return(flags);
}
@@ -6604,8 +6572,7 @@ uint8_t
get_innobase_type_from_mysql_type(unsigned *unsigned_flag, const Field *field)
{
/* The following asserts try to check that the MySQL type code fits in
- 8 bits: this is used in ibuf and also when DATA_NOT_NULL is ORed to
- the type */
+ 8 bits: this is used when DATA_NOT_NULL is ORed to the type */
static_assert(MYSQL_TYPE_STRING < 256, "compatibility");
static_assert(MYSQL_TYPE_VAR_STRING < 256, "compatibility");
@@ -9506,6 +9473,11 @@ ha_innobase::ft_init()
trx->will_lock = true;
}
+ /* If there is an FTS scan in progress, stop it */
+ fts_result_t* result = (reinterpret_cast<NEW_FT_INFO*>(ft_handler))->ft_result;
+ if (result)
+ result->current= NULL;
+
DBUG_RETURN(rnd_init(false));
}
@@ -14348,13 +14320,15 @@ ha_innobase::estimate_rows_upper_bound()
DBUG_RETURN((ha_rows) estimate);
}
+
/*********************************************************************//**
How many seeks it will take to read through the table. This is to be
comparable to the number returned by records_in_range so that we can
decide if we should scan the table or use keys.
@return estimated time measured in disk seeks */
-double
+#ifdef NOT_USED
+IO_AND_CPU_COST
ha_innobase::scan_time()
/*====================*/
{
@@ -14374,24 +14348,28 @@ ha_innobase::scan_time()
TODO: This will be further improved to return some approximate
estimate but that would also needs pre-population of stats
structure. As of now approach is in sync with MyISAM. */
- return(ulonglong2double(stats.data_file_length) / IO_SIZE + 2);
+ return { (ulonglong2double(stats.data_file_length) / IO_SIZE * DISK_READ_COST), 0.0 };
}
ulint stat_clustered_index_size;
-
+ IO_AND_CPU_COST cost;
ut_a(m_prebuilt->table->stat_initialized);
stat_clustered_index_size =
m_prebuilt->table->stat_clustered_index_size;
- return((double) stat_clustered_index_size);
+ cost.io= (double) stat_clustered_index_size * DISK_READ_COST;
+ cost.cpu= 0;
+ return(cost);
}
+#endif
/******************************************************************//**
Calculate the time it takes to read a set of ranges through an index
This enables us to optimise reads for clustered indexes.
@return estimated time measured in disk seeks */
+#ifdef NOT_USED
double
ha_innobase::read_time(
/*===================*/
@@ -14416,8 +14394,33 @@ ha_innobase::read_time(
return(time_for_scan);
}
- return(ranges + (double) rows / (double) total_rows * time_for_scan);
+ return(ranges * KEY_LOOKUP_COST + (double) rows / (double) total_rows * time_for_scan);
+}
+
+/******************************************************************//**
+Calculate the time it takes to read a set of rows with primary key.
+*/
+
+IO_AND_CPU_COST
+ha_innobase::rnd_pos_time(ha_rows rows)
+{
+ ha_rows total_rows;
+
+ /* Assume that the read time is proportional to the scan time for all
+ rows + at most one seek per range. */
+
+ IO_AND_CPU_COST time_for_scan = scan_time();
+
+ if ((total_rows = estimate_rows_upper_bound()) < rows) {
+
+ return(time_for_scan);
+ }
+ double frac= (double) rows + (double) rows / (double) total_rows;
+ time_for_scan.io*= frac;
+ time_for_scan.cpu*= frac;
+ return(time_for_scan);
}
+#endif
/*********************************************************************//**
Calculates the key number used inside MySQL for an Innobase index.
@@ -14896,13 +14899,6 @@ ha_innobase::info_low(
innodb_rec_per_key(index, j,
stats.records));
- /* Since MySQL seems to favor table scans
- too much over index searches, we pretend
- index selectivity is 2 times better than
- our estimate: */
-
- rec_per_key_int = rec_per_key_int / 2;
-
if (rec_per_key_int == 0) {
rec_per_key_int = 1;
}
@@ -15290,7 +15286,7 @@ ha_innobase::check(
}
/* Restore the original isolation level */
- m_prebuilt->trx->isolation_level = old_isolation_level;
+ m_prebuilt->trx->isolation_level = old_isolation_level & 3;
#ifdef BTR_CUR_HASH_ADAPT
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/* We validate the whole adaptive hash index for all tables
@@ -16324,7 +16320,7 @@ ha_innobase::store_lock(
if (lock_type != TL_IGNORE
&& trx->n_mysql_tables_in_use == 0) {
trx->isolation_level = innobase_map_isolation_level(
- (enum_tx_isolation) thd_tx_isolation(thd));
+ (enum_tx_isolation) thd_tx_isolation(thd)) & 3;
if (trx->isolation_level <= TRX_ISO_READ_COMMITTED) {
@@ -17462,20 +17458,6 @@ innodb_old_blocks_pct_update(THD*, st_mysql_sys_var*, void*, const void* save)
innobase_old_blocks_pct = ratio;
}
-/****************************************************************//**
-Update the system variable innodb_old_blocks_pct using the "saved"
-value. This function is registered as a callback with MySQL. */
-static
-void
-innodb_change_buffer_max_size_update(THD*, st_mysql_sys_var*, void*,
- const void* save)
-{
- srv_change_buffer_max_size = *static_cast<const uint*>(save);
- mysql_mutex_unlock(&LOCK_global_system_variables);
- ibuf_max_size_update(srv_change_buffer_max_size);
- mysql_mutex_lock(&LOCK_global_system_variables);
-}
-
#ifdef UNIV_DEBUG
static uint srv_fil_make_page_dirty_debug = 0;
static uint srv_saved_page_number_debug;
@@ -18429,7 +18411,7 @@ buffer_pool_load_abort(
}
#if defined __linux__ || defined _WIN32
-static void innodb_log_file_buffering_update(THD *thd, st_mysql_sys_var*,
+static void innodb_log_file_buffering_update(THD *, st_mysql_sys_var*,
void *, const void *save)
{
mysql_mutex_unlock(&LOCK_global_system_variables);
@@ -18438,6 +18420,30 @@ static void innodb_log_file_buffering_update(THD *thd, st_mysql_sys_var*,
}
#endif
+static void innodb_log_file_write_through_update(THD *, st_mysql_sys_var*,
+ void *, const void *save)
+{
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ log_sys.set_write_through(*static_cast<const my_bool*>(save));
+ mysql_mutex_lock(&LOCK_global_system_variables);
+}
+
+static void innodb_data_file_buffering_update(THD *, st_mysql_sys_var*,
+ void *, const void *save)
+{
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ fil_system.set_buffered(*static_cast<const my_bool*>(save));
+ mysql_mutex_lock(&LOCK_global_system_variables);
+}
+
+static void innodb_data_file_write_through_update(THD *, st_mysql_sys_var*,
+ void *, const void *save)
+{
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ fil_system.set_write_through(*static_cast<const my_bool*>(save));
+ mysql_mutex_lock(&LOCK_global_system_variables);
+}
+
static void innodb_log_file_size_update(THD *thd, st_mysql_sys_var*,
void *var, const void *save)
{
@@ -18874,7 +18880,7 @@ static MYSQL_SYSVAR_UINT(fast_shutdown, srv_fast_shutdown,
fast_shutdown_validate, NULL, 1, 0, 3, 0);
static MYSQL_SYSVAR_BOOL(file_per_table, srv_file_per_table,
- PLUGIN_VAR_NOCMDARG,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_DEPRECATED,
"Stores each InnoDB table to an .ibd file in the database dir.",
NULL, NULL, TRUE);
@@ -18904,11 +18910,10 @@ static MYSQL_SYSVAR_ULONG(flush_log_at_trx_commit, srv_flush_log_at_trx_commit,
" guarantees in case of crash. 0 and 2 can be faster than 1 or 3.",
NULL, NULL, 1, 0, 3, 0);
-static MYSQL_SYSVAR_ENUM(flush_method, srv_file_flush_method,
- PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+static MYSQL_SYSVAR_ENUM(flush_method, innodb_flush_method,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY | PLUGIN_VAR_DEPRECATED,
"With which method to flush data.",
- NULL, NULL, IF_WIN(SRV_ALL_O_DIRECT_FSYNC, SRV_O_DIRECT),
- &innodb_flush_method_typelib);
+ NULL, NULL, innodb_flush_method_default, &innodb_flush_method_typelib);
static MYSQL_SYSVAR_STR(log_group_home_dir, srv_log_group_home_dir,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
@@ -19105,7 +19110,7 @@ static MYSQL_SYSVAR_BOOL(buffer_pool_load_at_startup, srv_buffer_pool_load_at_st
NULL, NULL, TRUE);
static MYSQL_SYSVAR_BOOL(defragment, srv_defragment,
- PLUGIN_VAR_RQCMDARG,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"Enable/disable InnoDB defragmentation (default FALSE). When set to FALSE, all existing "
"defragmentation will be paused. And new defragmentation command will fail."
"Paused defragmentation commands will resume when this variable is set to "
@@ -19113,14 +19118,14 @@ static MYSQL_SYSVAR_BOOL(defragment, srv_defragment,
NULL, NULL, FALSE);
static MYSQL_SYSVAR_UINT(defragment_n_pages, srv_defragment_n_pages,
- PLUGIN_VAR_RQCMDARG,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"Number of pages considered at once when merging multiple pages to "
"defragment",
NULL, NULL, 7, 2, 32, 0);
static MYSQL_SYSVAR_UINT(defragment_stats_accuracy,
srv_defragment_stats_accuracy,
- PLUGIN_VAR_RQCMDARG,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"How many defragment stats changes there are before the stats "
"are written to persistent storage. Set to 0 meaning disable "
"defragment stats tracking.",
@@ -19128,7 +19133,7 @@ static MYSQL_SYSVAR_UINT(defragment_stats_accuracy,
static MYSQL_SYSVAR_UINT(defragment_fill_factor_n_recs,
srv_defragment_fill_factor_n_recs,
- PLUGIN_VAR_RQCMDARG,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"How many records of space defragmentation should leave on the page. "
"This variable, together with innodb_defragment_fill_factor, is introduced "
"so defragmentation won't pack the page too full and cause page split on "
@@ -19137,7 +19142,7 @@ static MYSQL_SYSVAR_UINT(defragment_fill_factor_n_recs,
NULL, NULL, 20, 1, 100, 0);
static MYSQL_SYSVAR_DOUBLE(defragment_fill_factor, srv_defragment_fill_factor,
- PLUGIN_VAR_RQCMDARG,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"A number between [0.7, 1] that tells defragmentation how full it should "
"fill a page. Default is 0.9. Number below 0.7 won't make much sense."
"This variable, together with innodb_defragment_fill_factor_n_recs, is "
@@ -19147,7 +19152,7 @@ static MYSQL_SYSVAR_DOUBLE(defragment_fill_factor, srv_defragment_fill_factor,
NULL, NULL, 0.9, 0.7, 1, 0);
static MYSQL_SYSVAR_UINT(defragment_frequency, srv_defragment_frequency,
- PLUGIN_VAR_RQCMDARG,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
"Do not defragment a single index more than this number of time per second."
"This controls the number of time defragmentation thread can request X_LOCK "
"on an index. Defragmentation thread will check whether "
@@ -19340,6 +19345,21 @@ static MYSQL_SYSVAR_BOOL(log_file_buffering, log_sys.log_buffered,
nullptr, innodb_log_file_buffering_update, FALSE);
#endif
+static MYSQL_SYSVAR_BOOL(log_file_write_through, log_sys.log_write_through,
+ PLUGIN_VAR_OPCMDARG,
+ "Whether each write to ib_logfile0 is write through",
+ nullptr, innodb_log_file_write_through_update, FALSE);
+
+static MYSQL_SYSVAR_BOOL(data_file_buffering, fil_system.buffered,
+ PLUGIN_VAR_OPCMDARG,
+ "Whether the file system cache for data files is enabled",
+ nullptr, innodb_data_file_buffering_update, FALSE);
+
+static MYSQL_SYSVAR_BOOL(data_file_write_through, fil_system.write_through,
+ PLUGIN_VAR_OPCMDARG,
+ "Whether each write to data files writes through",
+ nullptr, innodb_data_file_write_through_update, FALSE);
+
static MYSQL_SYSVAR_ULONGLONG(log_file_size, srv_log_file_size,
PLUGIN_VAR_RQCMDARG,
"Redo log size in bytes.",
@@ -19400,7 +19420,7 @@ static MYSQL_SYSVAR_UINT(undo_tablespaces, srv_undo_tablespaces,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Number of undo tablespaces to use.",
NULL, NULL,
- 0L, /* Default seting */
+ 3L, /* Default seting */
0L, /* Minimum value */
TRX_SYS_MAX_UNDO_SPACES, 0); /* Maximum value */
@@ -19473,31 +19493,6 @@ static MYSQL_SYSVAR_BOOL(numa_interleave, srv_numa_interleave,
NULL, NULL, FALSE);
#endif /* HAVE_LIBNUMA */
-static void innodb_change_buffering_update(THD *thd, struct st_mysql_sys_var*,
- void*, const void *save)
-{
- ulong i= *static_cast<const ulong*>(save);
- if (i != IBUF_USE_NONE && !ibuf.index)
- push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_NOT_KEYFILE,
- "InnoDB: The change buffer is corrupted.");
- else
- innodb_change_buffering= i;
-}
-
-static MYSQL_SYSVAR_ENUM(change_buffering, innodb_change_buffering,
- PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_DEPRECATED,
- "Buffer changes to secondary indexes.",
- nullptr, innodb_change_buffering_update,
- IBUF_USE_NONE, &innodb_change_buffering_typelib);
-
-static MYSQL_SYSVAR_UINT(change_buffer_max_size,
- srv_change_buffer_max_size,
- PLUGIN_VAR_RQCMDARG,
- "Maximum on-disk size of change buffer in terms of percentage"
- " of the buffer pool.",
- NULL, innodb_change_buffer_max_size_update,
- CHANGE_BUFFER_DEFAULT_SIZE, 0, 50, 0);
-
static MYSQL_SYSVAR_ENUM(stats_method, srv_innodb_stats_method,
PLUGIN_VAR_RQCMDARG,
"Specifies how InnoDB index statistics collection code should"
@@ -19505,18 +19500,6 @@ static MYSQL_SYSVAR_ENUM(stats_method, srv_innodb_stats_method,
" NULLS_UNEQUAL and NULLS_IGNORED",
NULL, NULL, SRV_STATS_NULLS_EQUAL, &innodb_stats_method_typelib);
-#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
-static MYSQL_SYSVAR_BOOL(change_buffer_dump, ibuf_dump,
- PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
- "Dump the change buffer at startup.",
- NULL, NULL, FALSE);
-
-static MYSQL_SYSVAR_UINT(change_buffering_debug, ibuf_debug,
- PLUGIN_VAR_RQCMDARG,
- "Debug flags for InnoDB change buffering (0=none, 1=try to buffer)",
- NULL, NULL, 0, 0, 1, 0);
-#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
-
static MYSQL_SYSVAR_ULONG(buf_dump_status_frequency, srv_buf_dump_status_frequency,
PLUGIN_VAR_RQCMDARG,
"A number between [0, 100] that tells how oftern buffer pool dump status "
@@ -19796,6 +19779,9 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
#if defined __linux__ || defined _WIN32
MYSQL_SYSVAR(log_file_buffering),
#endif
+ MYSQL_SYSVAR(log_file_write_through),
+ MYSQL_SYSVAR(data_file_buffering),
+ MYSQL_SYSVAR(data_file_write_through),
MYSQL_SYSVAR(log_file_size),
MYSQL_SYSVAR(log_group_home_dir),
MYSQL_SYSVAR(max_dirty_pages_pct),
@@ -19843,12 +19829,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
#ifdef HAVE_LIBNUMA
MYSQL_SYSVAR(numa_interleave),
#endif /* HAVE_LIBNUMA */
- MYSQL_SYSVAR(change_buffering),
- MYSQL_SYSVAR(change_buffer_max_size),
-#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
- MYSQL_SYSVAR(change_buffer_dump),
- MYSQL_SYSVAR(change_buffering_debug),
-#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
MYSQL_SYSVAR(random_read_ahead),
MYSQL_SYSVAR(read_ahead_threshold),
MYSQL_SYSVAR(read_only),
@@ -20004,6 +19984,7 @@ ha_innobase::multi_range_read_info_const(
uint n_ranges,
uint* bufsz,
uint* flags,
+ ha_rows limit,
Cost_estimate* cost)
{
/* See comments in ha_myisam::multi_range_read_info_const */
@@ -20013,8 +19994,9 @@ ha_innobase::multi_range_read_info_const(
*flags |= HA_MRR_USE_DEFAULT_IMPL;
}
- ha_rows res= m_ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param, n_ranges,
- bufsz, flags, cost);
+ ha_rows res= m_ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param,
+ n_ranges,
+ bufsz, flags, limit, cost);
return res;
}
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index 1f42bf180a8..60b56b4a22f 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -105,10 +105,10 @@ public:
int close(void) override;
- double scan_time() override;
-
- double read_time(uint index, uint ranges, ha_rows rows) override;
-
+#ifdef NOT_USED
+ IO_AND_CPU_COST scan_time() override;
+ double rnd_pos_time(ha_rows rows) override;
+#endif
int write_row(const uchar * buf) override;
int update_row(const uchar * old_data, const uchar * new_data) override;
@@ -383,6 +383,7 @@ public:
uint n_ranges,
uint* bufsz,
uint* flags,
+ ha_rows limit,
Cost_estimate* cost) override;
/** Initialize multi range read and get information.
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index 59a8b005557..cfa5ed922da 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -2154,8 +2154,7 @@ next_page:
}
next_page= false;
- block= btr_block_get(*clust_index, next_page_no, BTR_SEARCH_LEAF, false,
- &mtr);
+ block= btr_block_get(*clust_index, next_page_no, BTR_SEARCH_LEAF, &mtr);
if (!block)
goto non_empty;
page_cur_set_before_first(block, cur);
@@ -10225,6 +10224,7 @@ commit_try_rebuild(
/* We must be still holding a table handle. */
DBUG_ASSERT(user_table->get_ref_count() == 1);
+ rebuilt_table->row_id = uint64_t{user_table->row_id};
DBUG_EXECUTE_IF("ib_rebuild_cannot_rename", error = DB_ERROR;);
switch (error) {
diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc
index 3b537afef40..589182b73ba 100644
--- a/storage/innobase/handler/i_s.cc
+++ b/storage/innobase/handler/i_s.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2022, MariaDB Corporation.
+Copyright (c) 2014, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,7 +39,6 @@ Created July 18, 2007 Vasil Dimov
#include "dict0load.h"
#include "buf0buddy.h"
#include "buf0buf.h"
-#include "ibuf0ibuf.h"
#include "dict0mem.h"
#include "dict0types.h"
#include "srv0start.h"
@@ -80,10 +79,7 @@ in i_s_page_type[] array */
/** R-tree index page */
#define I_S_PAGE_TYPE_RTREE (FIL_PAGE_TYPE_LAST + 1)
-/** Change buffer B-tree page */
-#define I_S_PAGE_TYPE_IBUF (FIL_PAGE_TYPE_LAST + 2)
-
-#define I_S_PAGE_TYPE_LAST I_S_PAGE_TYPE_IBUF
+#define I_S_PAGE_TYPE_LAST I_S_PAGE_TYPE_RTREE
#define I_S_PAGE_TYPE_BITS 4
@@ -104,9 +100,6 @@ static buf_page_desc_t i_s_page_type[] = {
{"COMPRESSED_BLOB2", FIL_PAGE_TYPE_ZBLOB2},
{"UNKNOWN", I_S_PAGE_TYPE_UNKNOWN},
{"RTREE_INDEX", I_S_PAGE_TYPE_RTREE},
- {"IBUF_INDEX", I_S_PAGE_TYPE_IBUF},
- {"PAGE COMPRESSED", FIL_PAGE_PAGE_COMPRESSED},
- {"PAGE COMPRESSED AND ENCRYPTED", FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED},
};
/** This structure defines information we will fetch from pages
@@ -3776,17 +3769,17 @@ i_s_innodb_buffer_page_fill(
OK(fields[IDX_BUFFER_PAGE_STATE]->store(
std::min<uint32_t>(3, page_info->state) + 1, true));
- static_assert(buf_page_t::UNFIXED == 1U << 29, "comp.");
+ static_assert(buf_page_t::UNFIXED == 2U << 29, "comp.");
static_assert(buf_page_t::READ_FIX == 4U << 29, "comp.");
- static_assert(buf_page_t::WRITE_FIX == 5U << 29, "comp.");
+ static_assert(buf_page_t::WRITE_FIX == 6U << 29, "comp.");
unsigned io_fix = page_info->state >> 29;
if (io_fix < 4) {
io_fix = 1;
- } else if (io_fix > 5) {
- io_fix = 3;
+ } else if (io_fix == 4) {
+ io_fix = 2;
} else {
- io_fix -= 2;
+ io_fix = 3;
}
OK(fields[IDX_BUFFER_PAGE_IO_FIX]->store(io_fix, true));
@@ -3824,14 +3817,9 @@ i_s_innodb_set_page_type(
their values are defined as 17855 and 17854, so we cannot
use them to index into i_s_page_type[] array, its array index
in the i_s_page_type[] array is I_S_PAGE_TYPE_INDEX
- (1) for index pages or I_S_PAGE_TYPE_IBUF for
- change buffer index pages */
+ (1) for index pages */
if (page_type == FIL_PAGE_RTREE) {
page_info->page_type = I_S_PAGE_TYPE_RTREE;
- } else if (page_info->index_id
- == static_cast<index_id_t>(DICT_IBUF_ID_MIN
- + IBUF_SPACE_ID)) {
- page_info->page_type = I_S_PAGE_TYPE_IBUF;
} else {
ut_ad(page_type == FIL_PAGE_INDEX
|| page_type == FIL_PAGE_TYPE_INSTANT);
@@ -3876,9 +3864,9 @@ i_s_innodb_buffer_page_get_info(
static_assert(buf_page_t::NOT_USED == 0, "compatibility");
static_assert(buf_page_t::MEMORY == 1, "compatibility");
static_assert(buf_page_t::REMOVE_HASH == 2, "compatibility");
- static_assert(buf_page_t::UNFIXED == 1U << 29, "compatibility");
+ static_assert(buf_page_t::UNFIXED == 2U << 29, "compatibility");
static_assert(buf_page_t::READ_FIX == 4U << 29, "compatibility");
- static_assert(buf_page_t::WRITE_FIX == 5U << 29, "compatibility");
+ static_assert(buf_page_t::WRITE_FIX == 6U << 29, "compatibility");
page_info->state = bpage->state();
@@ -4268,17 +4256,17 @@ i_s_innodb_buf_page_lru_fill(
OK(fields[IDX_BUF_LRU_PAGE_STATE]->store(
page_info->compressed_only, true));
- static_assert(buf_page_t::UNFIXED == 1U << 29, "comp.");
+ static_assert(buf_page_t::UNFIXED == 2U << 29, "comp.");
static_assert(buf_page_t::READ_FIX == 4U << 29, "comp.");
- static_assert(buf_page_t::WRITE_FIX == 5U << 29, "comp.");
+ static_assert(buf_page_t::WRITE_FIX == 6U << 29, "comp.");
unsigned io_fix = page_info->state >> 29;
if (io_fix < 4) {
io_fix = 1;
- } else if (io_fix > 5) {
- io_fix = 3;
+ } else if (io_fix == 4) {
+ io_fix = 2;
} else {
- io_fix -= 2;
+ io_fix = 3;
}
OK(fields[IDX_BUF_LRU_PAGE_IO_FIX]->store(io_fix, true));
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index e988a685678..5303b592c71 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -17,1259 +17,99 @@ this program; if not, write to the Free Software Foundation, Inc.,
*****************************************************************************/
-/**************************************************//**
+/**
@file ibuf/ibuf0ibuf.cc
-Insert buffer
-
-Created 7/19/1997 Heikki Tuuri
-*******************************************************/
+Upgrade and removal of the InnoDB change buffer
+*/
#include "ibuf0ibuf.h"
#include "btr0sea.h"
-
-/** Number of bits describing a single page */
-#define IBUF_BITS_PER_PAGE 4
-/** The start address for an insert buffer bitmap page bitmap */
-#define IBUF_BITMAP PAGE_DATA
-
-#include "buf0buf.h"
-#include "buf0rea.h"
-#include "fsp0fsp.h"
-#include "trx0sys.h"
-#include "fil0fil.h"
-#include "rem0rec.h"
-#include "btr0cur.h"
#include "btr0pcur.h"
-#include "btr0btr.h"
#include "row0upd.h"
-#include "dict0boot.h"
-#include "fut0lst.h"
-#include "lock0lock.h"
-#include "log0recv.h"
-#include "que0que.h"
-#include "srv0start.h" /* srv_shutdown_state */
-#include "rem0cmp.h"
+#include "my_service_manager.h"
#include "log.h"
-/* STRUCTURE OF AN INSERT BUFFER RECORD
+/** Possible operations buffered in the change buffer. */
+enum ibuf_op
+{
+ IBUF_OP_INSERT= 0,
+ IBUF_OP_DELETE_MARK= 1,
+ IBUF_OP_DELETE= 2,
+};
+
+constexpr const page_id_t ibuf_root{0, FSP_IBUF_TREE_ROOT_PAGE_NO};
+constexpr const page_id_t ibuf_header{0, FSP_IBUF_HEADER_PAGE_NO};
+constexpr const index_id_t ibuf_index_id{0xFFFFFFFF00000000ULL};
-In versions < 4.1.x:
+/* Format of the change buffer records:
+
+MySQL 3.23 and MySQL 4.0 (not supported since MySQL 5.6.5 and MariaDB 10.0.11):
1. The first field is the page number.
2. The second field is an array which stores type info for each subsequent
- field. We store the information which affects the ordering of records, and
+ field (4 bytes per column).
+ We store the information which affects the ordering of records, and
also the physical storage size of an SQL NULL value. E.g., for CHAR(10) it
is 10 bytes.
3. Next we have the fields of the actual index record.
-In versions >= 4.1.x:
-
-Note that contary to what we planned in the 1990's, there will only be one
-insert buffer tree, and that is in the system tablespace of InnoDB.
+MySQL 4.1:
1. The first field is the space id.
2. The second field is a one-byte marker (0) which differentiates records from
the < 4.1.x storage format.
3. The third field is the page number.
-4. The fourth field contains the type info, where we have also added 2 bytes to
- store the charset. In the compressed table format of 5.0.x we must add more
- information here so that we can build a dummy 'index' struct which 5.0.x
- can use in the binary search on the index page in the ibuf merge phase.
+4. The fourth field contains the type info
+ (6 bytes per index field, 16-bit collation information added).
+ Unless ROW_FORMAT=REDUNDANT, we add more metadata here so that
+ we can access records in the index page.
5. The rest of the fields contain the fields of the actual index record.
-In versions >= 5.0.3:
+MySQL 5.0 (starting with MySQL 5.0.3) and MySQL 5.1:
The first byte of the fourth field is an additional marker (0) if the record
-is in the compact format. The presence of this marker can be detected by
-looking at the length of the field modulo DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE.
+is not in ROW_FORMAT=REDUNDANT. The presence of this marker can be detected by
+looking at the length of the field modulo 6.
The high-order bit of the character set field in the type info is the
"nullable" flag for the field.
-In versions >= 5.5:
+MySQL 5.5 and MariaDB 5.5 and later:
-The optional marker byte at the start of the fourth field is replaced by
-mandatory 3 fields, totaling 4 bytes:
+Unless innodb_change_buffering=inserts, the optional marker byte at
+the start of the fourth field may be replaced by mandatory 3 fields,
+comprising 4 bytes:
1. 2 bytes: Counter field, used to sort records within a (space id, page
no) in the order they were added. This is needed so that for example the
sequence of operations "INSERT x, DEL MARK x, INSERT x" is handled
correctly.
- 2. 1 byte: Operation type (see ibuf_op_t).
+ 2. 1 byte: Operation type (see ibuf_op).
- 3. 1 byte: Flags. Currently only one flag exists, IBUF_REC_COMPACT.
-
-To ensure older records, which do not have counters to enforce correct
-sorting, are merged before any new records, ibuf_insert checks if we're
-trying to insert to a position that contains old-style records, and if so,
-refuses the insert. Thus, ibuf pages are gradually converted to the new
-format as their corresponding buffer pool pages are read into memory.
+ 3. 1 byte: 0=ROW_FORMAT=REDUNDANT, 1=other
*/
-
-/* PREVENTING DEADLOCKS IN THE INSERT BUFFER SYSTEM
-
-If an OS thread performs any operation that brings in disk pages from
-non-system tablespaces into the buffer pool, or creates such a page there,
-then the operation may have as a side effect an insert buffer index tree
-compression. Thus, the tree latch of the insert buffer tree may be acquired
-in the x-mode, and also the file space latch of the system tablespace may
-be acquired in the x-mode.
-
-Also, an insert to an index in a non-system tablespace can have the same
-effect. How do we know this cannot lead to a deadlock of OS threads? There
-is a problem with the i\o-handler threads: they break the latching order
-because they own x-latches to pages which are on a lower level than the
-insert buffer tree latch, its page latches, and the tablespace latch an
-insert buffer operation can reserve.
-
-The solution is the following: Let all the tree and page latches connected
-with the insert buffer be later in the latching order than the fsp latch and
-fsp page latches.
-
-Insert buffer pages must be such that the insert buffer is never invoked
-when these pages are accessed as this would result in a recursion violating
-the latching order. We let a special i/o-handler thread take care of i/o to
-the insert buffer pages and the ibuf bitmap pages, as well as the fsp bitmap
-pages and the first inode page, which contains the inode of the ibuf tree: let
-us call all these ibuf pages. To prevent deadlocks, we do not let a read-ahead
-access both non-ibuf and ibuf pages.
-
-Then an i/o-handler for the insert buffer never needs to access recursively the
-insert buffer tree and thus obeys the latching order. On the other hand, other
-i/o-handlers for other tablespaces may require access to the insert buffer,
-but because all kinds of latches they need to access there are later in the
-latching order, no violation of the latching order occurs in this case,
-either.
-
-A problem is how to grow and contract an insert buffer tree. As it is later
-in the latching order than the fsp management, we have to reserve the fsp
-latch first, before adding or removing pages from the insert buffer tree.
-We let the insert buffer tree have its own file space management: a free
-list of pages linked to the tree root. To prevent recursive using of the
-insert buffer when adding pages to the tree, we must first load these pages
-to memory, obtaining a latch on them, and only after that add them to the
-free list of the insert buffer tree. More difficult is removing of pages
-from the free list. If there is an excess of pages in the free list of the
-ibuf tree, they might be needed if some thread reserves the fsp latch,
-intending to allocate more file space. So we do the following: if a thread
-reserves the fsp latch, we check the writer count field of the latch. If
-this field has value 1, it means that the thread did not own the latch
-before entering the fsp system, and the mtr of the thread contains no
-modifications to the fsp pages. Now we are free to reserve the ibuf latch,
-and check if there is an excess of pages in the free list. We can then, in a
-separate mini-transaction, take them out of the free list and free them to
-the fsp system.
-
-To avoid deadlocks in the ibuf system, we divide file pages into three levels:
-
-(1) non-ibuf pages,
-(2) ibuf tree pages and the pages in the ibuf tree free list, and
-(3) ibuf bitmap pages.
-
-No OS thread is allowed to access higher level pages if it has latches to
-lower level pages; even if the thread owns a B-tree latch it must not access
-the B-tree non-leaf pages if it has latches on lower level pages. Read-ahead
-is only allowed for level 1 and 2 pages. Dedicated i/o-handler threads handle
-exclusively level 1 i/o. A dedicated i/o handler thread handles exclusively
-level 2 i/o. However, if an OS thread does the i/o handling for itself, i.e.,
-it uses synchronous aio, it can access any pages, as long as it obeys the
-access order rules. */
-
-/** Operations that can currently be buffered. */
-ulong innodb_change_buffering;
-
-#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
-/** Dump the change buffer at startup */
-my_bool ibuf_dump;
-/** Flag to control insert buffer debugging. */
-uint ibuf_debug;
-#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
-
-/** The insert buffer control structure */
-ibuf_t ibuf;
-
-/** @name Offsets to the per-page bits in the insert buffer bitmap */
-/* @{ */
-#define IBUF_BITMAP_FREE 0 /*!< Bits indicating the
- amount of free space */
-#define IBUF_BITMAP_BUFFERED 2 /*!< TRUE if there are buffered
- changes for the page */
-#define IBUF_BITMAP_IBUF 3 /*!< TRUE if page is a part of
- the ibuf tree, excluding the
- root page, or is in the free
- list of the ibuf */
-/* @} */
-
-#define IBUF_REC_FIELD_SPACE 0 /*!< in the pre-4.1 format,
- the page number. later, the space_id */
-#define IBUF_REC_FIELD_MARKER 1 /*!< starting with 4.1, a marker
- consisting of 1 byte that is 0 */
-#define IBUF_REC_FIELD_PAGE 2 /*!< starting with 4.1, the
- page number */
-#define IBUF_REC_FIELD_METADATA 3 /* the metadata field */
-#define IBUF_REC_FIELD_USER 4 /* first user field */
-
-/* Various constants for checking the type of an ibuf record and extracting
-data from it. For details, see the description of the record format at the
-top of this file. */
-
-/** @name Format of the IBUF_REC_FIELD_METADATA of an insert buffer record
-The fourth column in the MySQL 5.5 format contains an operation
-type, counter, and some flags. */
-/* @{ */
-#define IBUF_REC_INFO_SIZE 4 /*!< Combined size of info fields at
- the beginning of the fourth field */
-
-/* Offsets for the fields at the beginning of the fourth field */
-#define IBUF_REC_OFFSET_COUNTER 0 /*!< Operation counter */
-#define IBUF_REC_OFFSET_TYPE 2 /*!< Type of operation */
-#define IBUF_REC_OFFSET_FLAGS 3 /*!< Additional flags */
-
-/* Record flag masks */
-#define IBUF_REC_COMPACT 0x1 /*!< Set in
- IBUF_REC_OFFSET_FLAGS if the
- user index is in COMPACT
- format or later */
-
-
-#ifndef SAFE_MUTEX
-static
-#endif /* SAFE_MUTEX */
-/** The mutex protecting the insert buffer */
-mysql_mutex_t ibuf_mutex,
- /** The mutex covering pessimistic inserts into the change buffer */
- ibuf_pessimistic_insert_mutex;
-
-/** The area in pages from which contract looks for page numbers for merge */
-const ulint IBUF_MERGE_AREA = 8;
-
-/** Inside the merge area, pages which have at most 1 per this number less
-buffered entries compared to maximum volume that can buffered for a single
-page are merged along with the page whose buffer became full */
-const ulint IBUF_MERGE_THRESHOLD = 4;
-
-/** In ibuf_contract at most this number of pages is read to memory in one
-batch, in order to merge the entries for them in the insert buffer */
-const ulint IBUF_MAX_N_PAGES_MERGED = IBUF_MERGE_AREA;
-
-/** If the combined size of the ibuf trees exceeds ibuf.max_size by
-this many pages, we start to contract it synchronous contract, but do
-not insert */
-const ulint IBUF_CONTRACT_DO_NOT_INSERT = 10;
-
-/* TODO: how to cope with drop table if there are records in the insert
-buffer for the indexes of the table? Is there actually any problem,
-because ibuf merge is done to a page when it is read in, and it is
-still physically like the index page even if the index would have been
-dropped! So, there seems to be no problem. */
-
-/******************************************************************//**
-Sets the flag in the current mini-transaction record indicating we're
-inside an insert buffer routine. */
-UNIV_INLINE
-void
-ibuf_enter(
-/*=======*/
- mtr_t* mtr) /*!< in/out: mini-transaction */
-{
- ut_ad(!mtr->is_inside_ibuf());
- mtr->enter_ibuf();
-}
-
-/******************************************************************//**
-Sets the flag in the current mini-transaction record indicating we're
-exiting an insert buffer routine. */
-UNIV_INLINE
-void
-ibuf_exit(
-/*======*/
- mtr_t* mtr) /*!< in/out: mini-transaction */
-{
- ut_ad(mtr->is_inside_ibuf());
- mtr->exit_ibuf();
-}
-
-/**************************************************************//**
-Commits an insert buffer mini-transaction and sets the persistent
-cursor latch mode to BTR_NO_LATCHES, that is, detaches the cursor. */
-UNIV_INLINE
-void
-ibuf_btr_pcur_commit_specify_mtr(
-/*=============================*/
- btr_pcur_t* pcur, /*!< in/out: persistent cursor */
- mtr_t* mtr) /*!< in/out: mini-transaction */
-{
- ut_d(ibuf_exit(mtr));
- btr_pcur_commit_specify_mtr(pcur, mtr);
-}
-
-/******************************************************************//**
-Gets the ibuf header page and x-latches it.
-@return insert buffer header page */
-static
-page_t*
-ibuf_header_page_get(
-/*=================*/
- mtr_t* mtr) /*!< in/out: mini-transaction */
-{
- ut_ad(!ibuf_inside(mtr));
-
- buf_block_t* block = buf_page_get(
- page_id_t(IBUF_SPACE_ID, FSP_IBUF_HEADER_PAGE_NO),
- 0, RW_X_LATCH, mtr);
-
- return block ? block->page.frame : nullptr;
-}
-
-/** Acquire the change buffer root page.
-@param[in,out] mtr mini-transaction
-@return change buffer root page, SX-latched */
-static buf_block_t *ibuf_tree_root_get(mtr_t *mtr, dberr_t *err= nullptr)
-{
- ut_ad(ibuf_inside(mtr));
- mysql_mutex_assert_owner(&ibuf_mutex);
-
- mtr_sx_lock_index(ibuf.index, mtr);
-
- buf_block_t *block=
- buf_page_get_gen(page_id_t{IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO},
- 0, RW_SX_LATCH, nullptr, BUF_GET, mtr, err);
- ut_ad(!block || ibuf.empty == page_is_empty(block->page.frame));
- return block;
-}
-
-/******************************************************************//**
-Closes insert buffer and frees the data structures. */
-void
-ibuf_close(void)
-/*============*/
-{
- if (!ibuf.index) {
- return;
- }
-
- mysql_mutex_destroy(&ibuf_pessimistic_insert_mutex);
- mysql_mutex_destroy(&ibuf_mutex);
-
- dict_table_t* ibuf_table = ibuf.index->table;
- ibuf.index->lock.free();
- dict_mem_index_free(ibuf.index);
- dict_mem_table_free(ibuf_table);
- ibuf.index = NULL;
-}
-
-/******************************************************************//**
-Updates the size information of the ibuf, assuming the segment size has not
-changed. */
-static
-void
-ibuf_size_update(
-/*=============*/
- const page_t* root) /*!< in: ibuf tree root */
-{
- mysql_mutex_assert_owner(&ibuf_mutex);
-
- ibuf.free_list_len = flst_get_len(root + PAGE_HEADER
- + PAGE_BTR_IBUF_FREE_LIST);
-
- ibuf.height = 1 + btr_page_get_level(root);
-
- /* the '1 +' is the ibuf header page */
- ibuf.size = ibuf.seg_size - (1 + ibuf.free_list_len);
-}
-
-/******************************************************************//**
-Creates the insert buffer data structure at a database startup and initializes
-the data structures for the insert buffer.
-@return DB_SUCCESS or failure */
-dberr_t
-ibuf_init_at_db_start(void)
-/*=======================*/
-{
- page_t* root;
-
- ut_ad(!ibuf.index);
- mtr_t mtr;
- mtr.start();
- compile_time_assert(IBUF_SPACE_ID == TRX_SYS_SPACE);
- compile_time_assert(IBUF_SPACE_ID == 0);
- mtr.x_lock_space(fil_system.sys_space);
- dberr_t err;
- buf_block_t* header_page = buf_page_get_gen(
- page_id_t(IBUF_SPACE_ID, FSP_IBUF_HEADER_PAGE_NO),
- 0, RW_X_LATCH, nullptr, BUF_GET, &mtr, &err);
-
- if (!header_page) {
-err_exit:
- sql_print_error("InnoDB: The change buffer is corrupted"
- " or has been removed on upgrade"
- " to MariaDB 11.0 or later");
- mtr.commit();
- if (innodb_change_buffering == IBUF_USE_NONE) {
- err = DB_SUCCESS;
- }
- return err;
- }
-
- fseg_n_reserved_pages(*header_page,
- IBUF_HEADER + IBUF_TREE_SEG_HEADER
- + header_page->page.frame, &ibuf.seg_size, &mtr);
-
- do {
- DBUG_EXECUTE_IF("intermittent_read_failure", continue;);
- ut_ad(ibuf.seg_size >= 2);
- } while (0);
-
- if (buf_block_t* block =
- buf_page_get_gen(page_id_t(IBUF_SPACE_ID,
- FSP_IBUF_TREE_ROOT_PAGE_NO),
- 0, RW_X_LATCH, nullptr, BUF_GET, &mtr, &err)) {
- root = buf_block_get_frame(block);
- } else {
- goto err_exit;
- }
-
- DBUG_EXECUTE_IF("ibuf_init_corrupt",
- err = DB_CORRUPTION;
- goto err_exit;);
-
- if (page_is_comp(root) || fil_page_get_type(root) != FIL_PAGE_INDEX
- || btr_page_get_index_id(root) != DICT_IBUF_ID_MIN) {
- err = DB_CORRUPTION;
- goto err_exit;
- }
-
- /* At startup we intialize ibuf to have a maximum of
- CHANGE_BUFFER_DEFAULT_SIZE in terms of percentage of the
- buffer pool size. Once ibuf struct is initialized this
- value is updated with the user supplied size by calling
- ibuf_max_size_update(). */
- ibuf.max_size = ((buf_pool_get_curr_size() >> srv_page_size_shift)
- * CHANGE_BUFFER_DEFAULT_SIZE) / 100;
-
- mysql_mutex_init(ibuf_mutex_key, &ibuf_mutex, nullptr);
- mysql_mutex_init(ibuf_pessimistic_insert_mutex_key,
- &ibuf_pessimistic_insert_mutex, nullptr);
-
- mysql_mutex_lock(&ibuf_mutex);
- ibuf_size_update(root);
- mysql_mutex_unlock(&ibuf_mutex);
-
- ibuf.empty = page_is_empty(root);
- mtr.commit();
-
- ibuf.index = dict_mem_index_create(
- dict_table_t::create(
- {C_STRING_WITH_LEN("innodb_change_buffer")},
- fil_system.sys_space, 1, 0, 0, 0),
- "CLUST_IND",
- DICT_CLUSTERED | DICT_IBUF, 1);
- ibuf.index->id = DICT_IBUF_ID_MIN + IBUF_SPACE_ID;
- ibuf.index->n_uniq = REC_MAX_N_FIELDS;
- ibuf.index->lock.SRW_LOCK_INIT(index_tree_rw_lock_key);
-#ifdef BTR_CUR_ADAPT
- ibuf.index->search_info = btr_search_info_create(ibuf.index->heap);
-#endif /* BTR_CUR_ADAPT */
- ibuf.index->page = FSP_IBUF_TREE_ROOT_PAGE_NO;
- ut_d(ibuf.index->cached = TRUE);
-
-#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
- if (!ibuf_dump) {
- return DB_SUCCESS;
- }
- ib::info() << "Dumping the change buffer";
- ibuf_mtr_start(&mtr);
- btr_pcur_t pcur;
- if (DB_SUCCESS
- == pcur.open_leaf(true, ibuf.index, BTR_SEARCH_LEAF, &mtr)) {
- while (btr_pcur_move_to_next_user_rec(&pcur, &mtr)) {
- rec_print_old(stderr, btr_pcur_get_rec(&pcur));
- }
- }
- ibuf_mtr_commit(&mtr);
- ib::info() << "Dumped the change buffer";
-#endif
-
- return DB_SUCCESS;
-}
-
-/*********************************************************************//**
-Updates the max_size value for ibuf. */
-void
-ibuf_max_size_update(
-/*=================*/
- ulint new_val) /*!< in: new value in terms of
- percentage of the buffer pool size */
-{
- if (UNIV_UNLIKELY(!ibuf.index)) return;
- ulint new_size = ((buf_pool_get_curr_size() >> srv_page_size_shift)
- * new_val) / 100;
- mysql_mutex_lock(&ibuf_mutex);
- ibuf.max_size = new_size;
- mysql_mutex_unlock(&ibuf_mutex);
-}
-
-# ifdef UNIV_DEBUG
-/** Gets the desired bits for a given page from a bitmap page.
-@param[in] page bitmap page
-@param[in] page_id page id whose bits to get
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
-@param[in,out] mtr mini-transaction holding an x-latch on the
-bitmap page
-@return value of bits */
-# define ibuf_bitmap_page_get_bits(page, page_id, zip_size, bit, mtr) \
- ibuf_bitmap_page_get_bits_low(page, page_id, zip_size, \
- MTR_MEMO_PAGE_X_FIX, mtr, bit)
-# else /* UNIV_DEBUG */
-/** Gets the desired bits for a given page from a bitmap page.
-@param[in] page bitmap page
-@param[in] page_id page id whose bits to get
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
-@param[in,out] mtr mini-transaction holding an x-latch on the
-bitmap page
-@return value of bits */
-# define ibuf_bitmap_page_get_bits(page, page_id, zip_size, bit, mtr) \
- ibuf_bitmap_page_get_bits_low(page, page_id, zip_size, bit)
-# endif /* UNIV_DEBUG */
-
-/** Gets the desired bits for a given page from a bitmap page.
-@param[in] page bitmap page
-@param[in] page_id page id whose bits to get
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in] latch_type MTR_MEMO_PAGE_X_FIX, MTR_MEMO_BUF_FIX, ...
-@param[in,out] mtr mini-transaction holding latch_type on the
-bitmap page
-@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
-@return value of bits */
-UNIV_INLINE
-ulint
-ibuf_bitmap_page_get_bits_low(
- const page_t* page,
- const page_id_t page_id,
- ulint zip_size,
-#ifdef UNIV_DEBUG
- ulint latch_type,
- mtr_t* mtr,
-#endif /* UNIV_DEBUG */
- ulint bit)
-{
- ulint byte_offset;
- ulint bit_offset;
- ulint map_byte;
- ulint value;
- const ulint size = zip_size ? zip_size : srv_page_size;
-
- ut_ad(ut_is_2pow(zip_size));
- ut_ad(bit < IBUF_BITS_PER_PAGE);
- compile_time_assert(!(IBUF_BITS_PER_PAGE % 2));
- ut_ad(mtr->memo_contains_page_flagged(page, latch_type));
-
- bit_offset = (page_id.page_no() & (size - 1))
- * IBUF_BITS_PER_PAGE + bit;
-
- byte_offset = bit_offset / 8;
- bit_offset = bit_offset % 8;
-
- ut_ad(byte_offset + IBUF_BITMAP < srv_page_size);
-
- map_byte = mach_read_from_1(page + IBUF_BITMAP + byte_offset);
-
- value = ut_bit_get_nth(map_byte, bit_offset);
-
- if (bit == IBUF_BITMAP_FREE) {
- ut_ad(bit_offset + 1 < 8);
-
- value = value * 2 + ut_bit_get_nth(map_byte, bit_offset + 1);
- }
-
- return(value);
-}
-
-/** Sets the desired bit for a given page in a bitmap page.
-@tparam bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
-@param[in,out] block bitmap page
-@param[in] page_id page id whose bits to set
-@param[in] physical_size page size
-@param[in] val value to set
-@param[in,out] mtr mtr containing an x-latch to the bitmap page */
-template<ulint bit>
-static void
-ibuf_bitmap_page_set_bits(
- buf_block_t* block,
- const page_id_t page_id,
- ulint physical_size,
- ulint val,
- mtr_t* mtr)
-{
- ulint byte_offset;
- ulint bit_offset;
-
- static_assert(bit < IBUF_BITS_PER_PAGE, "wrong bit");
- compile_time_assert(!(IBUF_BITS_PER_PAGE % 2));
- ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
- ut_ad(mtr->is_named_space(page_id.space()));
-
- bit_offset = (page_id.page_no() % physical_size)
- * IBUF_BITS_PER_PAGE + bit;
-
- byte_offset = bit_offset / 8;
- bit_offset = bit_offset % 8;
-
- ut_ad(byte_offset + IBUF_BITMAP < srv_page_size);
-
- byte* map_byte = &block->page.frame[IBUF_BITMAP + byte_offset];
- byte b = *map_byte;
-
- if (bit == IBUF_BITMAP_FREE) {
- ut_ad(bit_offset + 1 < 8);
- ut_ad(val <= 3);
- b &= static_cast<byte>(~(3U << bit_offset));
- b |= static_cast<byte>(((val & 2) >> 1) << bit_offset
- | (val & 1) << (bit_offset + 1));
- } else {
- ut_ad(val <= 1);
- b &= static_cast<byte>(~(1U << bit_offset));
-#if defined __GNUC__ && !defined __clang__ && __GNUC__ < 6
-# pragma GCC diagnostic push
-# pragma GCC diagnostic ignored "-Wconversion" /* GCC 5 may need this here */
-#endif
- b |= static_cast<byte>(val << bit_offset);
-#if defined __GNUC__ && !defined __clang__ && __GNUC__ < 6
-# pragma GCC diagnostic pop
-#endif
- }
-
- mtr->write<1,mtr_t::MAYBE_NOP>(*block, map_byte, b);
-}
-
-/** Calculates the bitmap page number for a given page number.
-@param[in] page_id page id
-@param[in] size page size
-@return the bitmap page id where the file page is mapped */
-inline page_id_t ibuf_bitmap_page_no_calc(const page_id_t page_id, ulint size)
-{
- if (!size)
- size= srv_page_size;
-
- return page_id_t(page_id.space(), FSP_IBUF_BITMAP_OFFSET
- + uint32_t(page_id.page_no() & ~(size - 1)));
-}
-
-/** Gets the ibuf bitmap page where the bits describing a given file page are
-stored.
-@param[in] page_id page id of the file page
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in,out] mtr mini-transaction
-@return bitmap page where the file page is mapped, that is, the bitmap
-page containing the descriptor bits for the file page; the bitmap page
-is x-latched */
-static
-buf_block_t*
-ibuf_bitmap_get_map_page(
- const page_id_t page_id,
- ulint zip_size,
- mtr_t* mtr)
-{
- return buf_page_get_gen(ibuf_bitmap_page_no_calc(page_id, zip_size),
- zip_size, RW_X_LATCH, nullptr,
- BUF_GET_POSSIBLY_FREED, mtr);
-}
-
-/************************************************************************//**
-Sets the free bits of the page in the ibuf bitmap. This is done in a separate
-mini-transaction, hence this operation does not restrict further work to only
-ibuf bitmap operations, which would result if the latch to the bitmap page
-were kept. */
-UNIV_INLINE
-void
-ibuf_set_free_bits_low(
-/*===================*/
- const buf_block_t* block, /*!< in: index page; free bits are set if
- the index is non-clustered and page
- level is 0 */
- ulint val, /*!< in: value to set: < 4 */
- mtr_t* mtr) /*!< in/out: mtr */
-{
- ut_ad(mtr->is_named_space(block->page.id().space()));
- if (!page_is_leaf(block->page.frame)) {
- return;
- }
-
-#ifdef UNIV_IBUF_DEBUG
- ut_a(val <= ibuf_index_page_calc_free(block));
-#endif /* UNIV_IBUF_DEBUG */
- const page_id_t id(block->page.id());
-
- if (buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
- id, block->zip_size(), mtr)) {
- ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
- bitmap_page, id, block->physical_size(),
- val, mtr);
- }
-}
-
-/************************************************************************//**
-Sets the free bit of the page in the ibuf bitmap. This is done in a separate
-mini-transaction, hence this operation does not restrict further work to only
-ibuf bitmap operations, which would result if the latch to the bitmap page
-were kept. */
-void
-ibuf_set_free_bits_func(
-/*====================*/
- buf_block_t* block, /*!< in: index page of a non-clustered index;
- free bit is reset if page level is 0 */
-#ifdef UNIV_IBUF_DEBUG
- ulint max_val,/*!< in: ULINT_UNDEFINED or a maximum
- value which the bits must have before
- setting; this is for debugging */
-#endif /* UNIV_IBUF_DEBUG */
- ulint val) /*!< in: value to set: < 4 */
-{
- if (!page_is_leaf(block->page.frame))
- return;
-
- mtr_t mtr;
- mtr.start();
- const page_id_t id(block->page.id());
- const fil_space_t *space= mtr.set_named_space_id(id.space());
-
- if (buf_block_t *bitmap_page=
- ibuf_bitmap_get_map_page(id, block->zip_size(), &mtr))
- {
- if (space->purpose != FIL_TYPE_TABLESPACE)
- mtr.set_log_mode(MTR_LOG_NO_REDO);
-
-#ifdef UNIV_IBUF_DEBUG
- if (max_val != ULINT_UNDEFINED)
- {
- ulint old_val= ibuf_bitmap_page_get_bits(bitmap_page, id,
- IBUF_BITMAP_FREE, &mtr);
- ut_a(old_val <= max_val);
- }
-
- ut_a(val <= ibuf_index_page_calc_free(block));
-#endif /* UNIV_IBUF_DEBUG */
-
- ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>
- (bitmap_page, id, block->physical_size(), val, &mtr);
- }
-
- mtr.commit();
-}
-
-/************************************************************************//**
-Resets the free bits of the page in the ibuf bitmap. This is done in a
-separate mini-transaction, hence this operation does not restrict
-further work to only ibuf bitmap operations, which would result if the
-latch to the bitmap page were kept. NOTE: The free bits in the insert
-buffer bitmap must never exceed the free space on a page. It is safe
-to decrement or reset the bits in the bitmap in a mini-transaction
-that is committed before the mini-transaction that affects the free
-space. */
-void
-ibuf_reset_free_bits(
-/*=================*/
- buf_block_t* block) /*!< in: index page; free bits are set to 0
- if the index is a non-clustered
- non-unique, and page level is 0 */
-{
- ibuf_set_free_bits(block, 0, ULINT_UNDEFINED);
-}
-
-/**********************************************************************//**
-Updates the free bits for an uncompressed page to reflect the present
-state. Does this in the mtr given, which means that the latching
-order rules virtually prevent any further operations for this OS
-thread until mtr is committed. NOTE: The free bits in the insert
-buffer bitmap must never exceed the free space on a page. It is safe
-to set the free bits in the same mini-transaction that updated the
-page. */
-void
-ibuf_update_free_bits_low(
-/*======================*/
- const buf_block_t* block, /*!< in: index page */
- ulint max_ins_size, /*!< in: value of
- maximum insert size
- with reorganize before
- the latest operation
- performed to the page */
- mtr_t* mtr) /*!< in/out: mtr */
-{
- ulint before;
- ulint after;
-
- ut_a(!is_buf_block_get_page_zip(block));
- ut_ad(mtr->is_named_space(block->page.id().space()));
-
- before = ibuf_index_page_calc_free_bits(srv_page_size,
- max_ins_size);
-
- after = ibuf_index_page_calc_free(block);
-
- /* This approach cannot be used on compressed pages, since the
- computed value of "before" often does not match the current
- state of the bitmap. This is because the free space may
- increase or decrease when a compressed page is reorganized. */
- if (before != after) {
- ibuf_set_free_bits_low(block, after, mtr);
- }
-}
-
-/**********************************************************************//**
-Updates the free bits for a compressed page to reflect the present
-state. Does this in the mtr given, which means that the latching
-order rules virtually prevent any further operations for this OS
-thread until mtr is committed. NOTE: The free bits in the insert
-buffer bitmap must never exceed the free space on a page. It is safe
-to set the free bits in the same mini-transaction that updated the
-page. */
-void
-ibuf_update_free_bits_zip(
-/*======================*/
- buf_block_t* block, /*!< in/out: index page */
- mtr_t* mtr) /*!< in/out: mtr */
-{
- ut_ad(page_is_leaf(block->page.frame));
- ut_ad(block->zip_size());
-
- ulint after = ibuf_index_page_calc_free_zip(block);
-
- if (after == 0) {
- /* We move the page to the front of the buffer pool LRU list:
- the purpose of this is to prevent those pages to which we
- cannot make inserts using the insert buffer from slipping
- out of the buffer pool */
-
- buf_page_make_young(&block->page);
- }
-
- if (buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
- block->page.id(), block->zip_size(), mtr)) {
-
- ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(
- bitmap_page, block->page.id(),
- block->physical_size(), after, mtr);
- }
-}
-
-/**********************************************************************//**
-Updates the free bits for the two pages to reflect the present state.
-Does this in the mtr given, which means that the latching order rules
-virtually prevent any further operations until mtr is committed.
-NOTE: The free bits in the insert buffer bitmap must never exceed the
-free space on a page. It is safe to set the free bits in the same
-mini-transaction that updated the pages. */
-void
-ibuf_update_free_bits_for_two_pages_low(
-/*====================================*/
- buf_block_t* block1, /*!< in: index page */
- buf_block_t* block2, /*!< in: index page */
- mtr_t* mtr) /*!< in: mtr */
-{
- ut_ad(mtr->is_named_space(block1->page.id().space()));
- ut_ad(block1->page.id().space() == block2->page.id().space());
-
- /* Avoid deadlocks by acquiring multiple bitmap page latches in
- a consistent order (smaller pointer first). */
- if (block1 > block2)
- std::swap(block1, block2);
-
- ibuf_set_free_bits_low(block1, ibuf_index_page_calc_free(block1), mtr);
- ibuf_set_free_bits_low(block2, ibuf_index_page_calc_free(block2), mtr);
-}
-
-/** Returns TRUE if the page is one of the fixed address ibuf pages.
-@param[in] page_id page id
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@return TRUE if a fixed address ibuf i/o page */
-inline bool ibuf_fixed_addr_page(const page_id_t page_id, ulint zip_size)
-{
- return(page_id == page_id_t(IBUF_SPACE_ID, IBUF_TREE_ROOT_PAGE_NO)
- || ibuf_bitmap_page(page_id, zip_size));
-}
-
-/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages.
-Must not be called when recv_no_ibuf_operations==true.
-@param[in] page_id page id
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in] x_latch FALSE if relaxed check (avoid latching the
-bitmap page)
-@param[in,out] mtr mtr which will contain an x-latch to the
-bitmap page if the page is not one of the fixed address ibuf pages, or NULL,
-in which case a new transaction is created.
-@return TRUE if level 2 or level 3 page */
-bool
-ibuf_page_low(
- const page_id_t page_id,
- ulint zip_size,
-#ifdef UNIV_DEBUG
- bool x_latch,
-#endif /* UNIV_DEBUG */
- mtr_t* mtr)
-{
- ibool ret;
- mtr_t local_mtr;
-
- ut_ad(!recv_no_ibuf_operations);
- ut_ad(x_latch || mtr == NULL);
-
- if (ibuf_fixed_addr_page(page_id, zip_size)) {
- return(true);
- } else if (page_id.space() != IBUF_SPACE_ID) {
- return(false);
- }
-
- compile_time_assert(IBUF_SPACE_ID == 0);
- ut_ad(fil_system.sys_space->purpose == FIL_TYPE_TABLESPACE);
-
-#ifdef UNIV_DEBUG
- if (!x_latch) {
- mtr_start(&local_mtr);
-
- /* Get the bitmap page without a page latch, so that
- we will not be violating the latching order when
- another bitmap page has already been latched by this
- thread. The page will be buffer-fixed, and thus it
- cannot be removed or relocated while we are looking at
- it. The contents of the page could change, but the
- IBUF_BITMAP_IBUF bit that we are interested in should
- not be modified by any other thread. Nobody should be
- calling ibuf_add_free_page() or ibuf_remove_free_page()
- while the page is linked to the insert buffer b-tree. */
- buf_block_t* block = buf_page_get_gen(
- ibuf_bitmap_page_no_calc(page_id, zip_size),
- zip_size, RW_NO_LATCH, nullptr, BUF_GET, &local_mtr);
-
- ret = block
- && ibuf_bitmap_page_get_bits_low(
- block->page.frame, page_id, zip_size,
- MTR_MEMO_BUF_FIX, &local_mtr, IBUF_BITMAP_IBUF);
-
- mtr_commit(&local_mtr);
- return(ret);
- }
-#endif /* UNIV_DEBUG */
-
- if (mtr == NULL) {
- mtr = &local_mtr;
- mtr_start(mtr);
- }
-
- buf_block_t *block = ibuf_bitmap_get_map_page(page_id, zip_size,
- mtr);
- ret = block
- && ibuf_bitmap_page_get_bits(block->page.frame,
- page_id, zip_size,
- IBUF_BITMAP_IBUF, mtr);
-
- if (mtr == &local_mtr) {
- mtr_commit(mtr);
- }
-
- return(ret);
-}
-
-#ifdef UNIV_DEBUG
-# define ibuf_rec_get_page_no(mtr,rec) ibuf_rec_get_page_no_func(mtr,rec)
-#else /* UNIV_DEBUG */
-# define ibuf_rec_get_page_no(mtr,rec) ibuf_rec_get_page_no_func(rec)
-#endif /* UNIV_DEBUG */
+/** first user record field */
+constexpr unsigned IBUF_REC_FIELD_USER= 4;
/********************************************************************//**
Returns the page number field of an ibuf record.
@return page number */
-static
-uint32_t
-ibuf_rec_get_page_no_func(
-/*======================*/
-#ifdef UNIV_DEBUG
- mtr_t* mtr, /*!< in: mini-transaction owning rec */
-#endif /* UNIV_DEBUG */
- const rec_t* rec) /*!< in: ibuf record */
+static uint32_t ibuf_rec_get_page_no(const rec_t *rec)
{
- const byte* field;
- ulint len;
-
- ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_S_FIX));
- ut_ad(ibuf_inside(mtr));
- ut_ad(rec_get_n_fields_old(rec) > 2);
-
- field = rec_get_nth_field_old(rec, IBUF_REC_FIELD_MARKER, &len);
-
- ut_a(len == 1);
-
- field = rec_get_nth_field_old(rec, IBUF_REC_FIELD_PAGE, &len);
-
- ut_a(len == 4);
-
- return(mach_read_from_4(field));
+ return mach_read_from_4(rec + 5);
}
-#ifdef UNIV_DEBUG
-# define ibuf_rec_get_space(mtr,rec) ibuf_rec_get_space_func(mtr,rec)
-#else /* UNIV_DEBUG */
-# define ibuf_rec_get_space(mtr,rec) ibuf_rec_get_space_func(rec)
-#endif /* UNIV_DEBUG */
-
/********************************************************************//**
-Returns the space id field of an ibuf record. For < 4.1.x format records
-returns 0.
+Returns the space id field of an ibuf record.
@return space id */
-static
-uint32_t
-ibuf_rec_get_space_func(
-/*====================*/
-#ifdef UNIV_DEBUG
- mtr_t* mtr, /*!< in: mini-transaction owning rec */
-#endif /* UNIV_DEBUG */
- const rec_t* rec) /*!< in: ibuf record */
-{
- const byte* field;
- ulint len;
-
- ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_S_FIX));
- ut_ad(ibuf_inside(mtr));
- ut_ad(rec_get_n_fields_old(rec) > 2);
-
- field = rec_get_nth_field_old(rec, IBUF_REC_FIELD_MARKER, &len);
-
- ut_a(len == 1);
-
- field = rec_get_nth_field_old(rec, IBUF_REC_FIELD_SPACE, &len);
-
- ut_a(len == 4);
-
- return(mach_read_from_4(field));
-}
-
-#ifdef UNIV_DEBUG
-# define ibuf_rec_get_info(mtr,rec,op,comp,info_len,counter) \
- ibuf_rec_get_info_func(mtr,rec,op,comp,info_len,counter)
-#else /* UNIV_DEBUG */
-# define ibuf_rec_get_info(mtr,rec,op,comp,info_len,counter) \
- ibuf_rec_get_info_func(rec,op,comp,info_len,counter)
-#endif
-/****************************************************************//**
-Get various information about an ibuf record in >= 4.1.x format. */
-static
-void
-ibuf_rec_get_info_func(
-/*===================*/
-#ifdef UNIV_DEBUG
- mtr_t* mtr, /*!< in: mini-transaction owning rec */
-#endif /* UNIV_DEBUG */
- const rec_t* rec, /*!< in: ibuf record */
- ibuf_op_t* op, /*!< out: operation type, or NULL */
- ibool* comp, /*!< out: compact flag, or NULL */
- ulint* info_len, /*!< out: length of info fields at the
- start of the fourth field, or
- NULL */
- ulint* counter) /*!< in: counter value, or NULL */
-{
- const byte* types;
- ulint fields;
- ulint len;
-
- /* Local variables to shadow arguments. */
- ibuf_op_t op_local;
- ibool comp_local;
- ulint info_len_local;
- ulint counter_local;
-
- ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_S_FIX));
- ut_ad(ibuf_inside(mtr));
- fields = rec_get_n_fields_old(rec);
- ut_a(fields > IBUF_REC_FIELD_USER);
-
- types = rec_get_nth_field_old(rec, IBUF_REC_FIELD_METADATA, &len);
-
- info_len_local = len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE;
- compile_time_assert(IBUF_REC_INFO_SIZE
- < DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE);
-
- switch (info_len_local) {
- case 0:
- case 1:
- op_local = IBUF_OP_INSERT;
- comp_local = info_len_local;
- ut_ad(!counter);
- counter_local = ULINT_UNDEFINED;
- break;
-
- case IBUF_REC_INFO_SIZE:
- op_local = (ibuf_op_t) types[IBUF_REC_OFFSET_TYPE];
- comp_local = types[IBUF_REC_OFFSET_FLAGS] & IBUF_REC_COMPACT;
- counter_local = mach_read_from_2(
- types + IBUF_REC_OFFSET_COUNTER);
- break;
-
- default:
- ut_error;
- }
-
- ut_a(op_local < IBUF_OP_COUNT);
- ut_a((len - info_len_local) ==
- (fields - IBUF_REC_FIELD_USER)
- * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE);
-
- if (op) {
- *op = op_local;
- }
-
- if (comp) {
- *comp = comp_local;
- }
-
- if (info_len) {
- *info_len = info_len_local;
- }
-
- if (counter) {
- *counter = counter_local;
- }
-}
-
-#ifdef UNIV_DEBUG
-# define ibuf_rec_get_op_type(mtr,rec) ibuf_rec_get_op_type_func(mtr,rec)
-#else /* UNIV_DEBUG */
-# define ibuf_rec_get_op_type(mtr,rec) ibuf_rec_get_op_type_func(rec)
-#endif
-
-/****************************************************************//**
-Returns the operation type field of an ibuf record.
-@return operation type */
-static
-ibuf_op_t
-ibuf_rec_get_op_type_func(
-/*======================*/
-#ifdef UNIV_DEBUG
- mtr_t* mtr, /*!< in: mini-transaction owning rec */
-#endif /* UNIV_DEBUG */
- const rec_t* rec) /*!< in: ibuf record */
-{
- ulint len;
-
- ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_S_FIX));
- ut_ad(ibuf_inside(mtr));
- ut_ad(rec_get_n_fields_old(rec) > 2);
-
- (void) rec_get_nth_field_old(rec, IBUF_REC_FIELD_MARKER, &len);
-
- if (len > 1) {
- /* This is a < 4.1.x format record */
-
- return(IBUF_OP_INSERT);
- } else {
- ibuf_op_t op;
-
- ibuf_rec_get_info(mtr, rec, &op, NULL, NULL, NULL);
-
- return(op);
- }
-}
-
-/****************************************************************//**
-Read the first two bytes from a record's fourth field (counter field in new
-records; something else in older records).
-@return "counter" field, or ULINT_UNDEFINED if for some reason it
-can't be read */
-ulint
-ibuf_rec_get_counter(
-/*=================*/
- const rec_t* rec) /*!< in: ibuf record */
-{
- const byte* ptr;
- ulint len;
-
- if (rec_get_n_fields_old(rec) <= IBUF_REC_FIELD_METADATA) {
-
- return(ULINT_UNDEFINED);
- }
-
- ptr = rec_get_nth_field_old(rec, IBUF_REC_FIELD_METADATA, &len);
-
- if (len >= 2) {
-
- return(mach_read_from_2(ptr));
- } else {
-
- return(ULINT_UNDEFINED);
- }
-}
-
-
-/**
- Add accumulated operation counts to a permanent array.
- Both arrays must be of size IBUF_OP_COUNT.
-*/
-static void ibuf_add_ops(Atomic_counter<ulint> *out, const ulint *in)
+static uint32_t ibuf_rec_get_space(const rec_t *rec)
{
- for (auto i = 0; i < IBUF_OP_COUNT; i++)
- out[i]+= in[i];
-}
-
-
-/****************************************************************//**
-Print operation counts. The array must be of size IBUF_OP_COUNT. */
-static
-void
-ibuf_print_ops(
-/*===========*/
- const char* op_name,/*!< in: operation name */
- const Atomic_counter<ulint>* ops, /*!< in: operation counts */
- FILE* file) /*!< in: file where to print */
-{
- static const char* op_names[] = {
- "insert",
- "delete mark",
- "delete"
- };
-
- static_assert(array_elements(op_names) == IBUF_OP_COUNT, "");
- fputs(op_name, file);
-
- for (ulint i = 0; i < IBUF_OP_COUNT; i++) {
- fprintf(file, "%s " ULINTPF "%s", op_names[i],
- ulint{ops[i]}, (i < (IBUF_OP_COUNT - 1)) ? ", " : "");
- }
-
- putc('\n', file);
+ return mach_read_from_4(rec);
}
/********************************************************************//**
-Creates a dummy index for inserting a record to a non-clustered index.
-@return dummy index */
-static
-dict_index_t*
-ibuf_dummy_index_create(
-/*====================*/
- ulint n, /*!< in: number of fields */
- ibool comp) /*!< in: TRUE=use compact record format */
-{
- dict_table_t* table;
- dict_index_t* index;
-
- table = dict_table_t::create({C_STRING_WITH_LEN("IBUF_DUMMY")},
- nullptr, n, 0,
- comp ? DICT_TF_COMPACT : 0, 0);
-
- index = dict_mem_index_create(table, "IBUF_DUMMY", 0, n);
-
- /* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */
- index->cached = TRUE;
- ut_d(index->is_dummy = true);
-
- return(index);
-}
-/********************************************************************//**
Add a column to the dummy index */
static
void
@@ -1287,93 +127,79 @@ ibuf_dummy_index_add_col(
dict_index_add_col(index, index->table,
dict_table_get_nth_col(index->table, i), len);
}
-/********************************************************************//**
-Deallocates a dummy index for inserting a record to a non-clustered index. */
+
+/**********************************************************************//**
+Reads to a type the stored information which determines its alphabetical
+ordering and the storage size of an SQL NULL value. This is the >= 4.1.x
+storage format. */
static
void
-ibuf_dummy_index_free(
-/*==================*/
- dict_index_t* index) /*!< in, own: dummy index */
+dtype_new_read_for_order_and_null_size(
+/*===================================*/
+ dtype_t* type, /*!< in: type struct */
+ const byte* buf) /*!< in: buffer for stored type order info */
{
- dict_table_t* table = index->table;
-
- dict_mem_index_free(index);
- dict_mem_table_free(table);
-}
+ type->mtype = buf[0] & 63;
+ type->prtype = buf[1];
-#ifdef UNIV_DEBUG
-# define ibuf_build_entry_from_ibuf_rec(mtr,ibuf_rec,heap,pindex) \
- ibuf_build_entry_from_ibuf_rec_func(mtr,ibuf_rec,heap,pindex)
-#else /* UNIV_DEBUG */
-# define ibuf_build_entry_from_ibuf_rec(mtr,ibuf_rec,heap,pindex) \
- ibuf_build_entry_from_ibuf_rec_func(ibuf_rec,heap,pindex)
-#endif
+ if (buf[0] & 128) {
+ type->prtype |= DATA_BINARY_TYPE;
+ }
-/*********************************************************************//**
-Builds the entry used to
+ if (buf[4] & 128) {
+ type->prtype |= DATA_NOT_NULL;
+ }
-1) IBUF_OP_INSERT: insert into a non-clustered index
+ type->len = mach_read_from_2(buf + 2);
-2) IBUF_OP_DELETE_MARK: find the record whose delete-mark flag we need to
- activate
+ uint32_t charset_coll = (mach_read_from_2(buf + 4) & CHAR_COLL_MASK)
+ << 16;
-3) IBUF_OP_DELETE: find the record we need to delete
+ if (dtype_is_string_type(type->mtype)) {
+ type->prtype |= charset_coll << 16;
-when we have the corresponding record in an ibuf index.
+ if (charset_coll == 0) {
+ /* This insert buffer record was inserted before
+ MySQL 4.1.2, and the charset-collation code was not
+ explicitly stored to dtype->prtype at that time. It
+ must be the default charset-collation of this MySQL
+ installation. */
+ type->prtype |= default_charset_info->number << 16;
+ }
+ }
-NOTE that as we copy pointers to fields in ibuf_rec, the caller must
-hold a latch to the ibuf_rec page as long as the entry is used!
+ dtype_set_mblen(type);
+}
-@return own: entry to insert to a non-clustered index */
-static
-dtuple_t*
-ibuf_build_entry_from_ibuf_rec_func(
-/*================================*/
-#ifdef UNIV_DEBUG
- mtr_t* mtr, /*!< in: mini-transaction owning rec */
-#endif /* UNIV_DEBUG */
- const rec_t* ibuf_rec, /*!< in: record in an insert buffer */
- mem_heap_t* heap, /*!< in: heap where built */
- dict_index_t** pindex) /*!< out, own: dummy index that
- describes the entry */
+/** Construct an index entry and an index for applying an operation.
+@param ibuf_rec change buffer record in an X-latched page
+@param not_redundant whether another format than ROW_FORMAT=REDUNDANT is used
+@param n_fields number of index record fields
+@param types type information
+@param heap memory heap
+@param index dummy index metadata
+@return the index entry for applying the operation */
+static dtuple_t *ibuf_entry_build(const rec_t *ibuf_rec, ulint not_redundant,
+ ulint n_fields, const byte *types,
+ mem_heap_t *heap, dict_index_t *&index)
{
dtuple_t* tuple;
dfield_t* field;
- ulint n_fields;
- const byte* types;
const byte* data;
ulint len;
- ulint info_len;
- ulint i;
- ulint comp;
- dict_index_t* index;
-
- ut_ad(mtr->memo_contains_page_flagged(ibuf_rec, MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_S_FIX));
- ut_ad(ibuf_inside(mtr));
-
- data = rec_get_nth_field_old(ibuf_rec, IBUF_REC_FIELD_MARKER, &len);
-
- ut_a(len == 1);
- ut_a(*data == 0);
- ut_a(rec_get_n_fields_old(ibuf_rec) > IBUF_REC_FIELD_USER);
-
- n_fields = rec_get_n_fields_old(ibuf_rec) - IBUF_REC_FIELD_USER;
tuple = dtuple_create(heap, n_fields);
- types = rec_get_nth_field_old(ibuf_rec, IBUF_REC_FIELD_METADATA, &len);
-
- ibuf_rec_get_info(mtr, ibuf_rec, NULL, &comp, &info_len, NULL);
-
- index = ibuf_dummy_index_create(n_fields, comp);
-
- len -= info_len;
- types += info_len;
-
- ut_a(len == n_fields * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE);
+ index = dict_mem_index_create(
+ dict_table_t::create({C_STRING_WITH_LEN("")}, nullptr,
+ n_fields, 0,
+ not_redundant ? DICT_TF_COMPACT : 0, 0),
+ "IBUF_DUMMY", 0, n_fields);
+ /* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */
+ ut_d(index->cached = true);
+ ut_d(index->is_dummy = true);
- for (i = 0; i < n_fields; i++) {
+ for (ulint i = 0; i < n_fields; i++) {
field = dtuple_get_nth_field(tuple, i);
data = rec_get_nth_field_old(
@@ -1382,8 +208,7 @@ ibuf_build_entry_from_ibuf_rec_func(
dfield_set_data(field, data, len);
dtype_new_read_for_order_and_null_size(
- dfield_get_type(field),
- types + i * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE);
+ dfield_get_type(field), types + i * 6);
ibuf_dummy_index_add_col(index, dfield_get_type(field), len);
}
@@ -1393,2220 +218,82 @@ ibuf_build_entry_from_ibuf_rec_func(
/* Prevent an ut_ad() failure in page_zip_write_rec() by
adding system columns to the dummy table pointed to by the
- dummy secondary index. The insert buffer is only used for
+ dummy secondary index. The change buffer was only used for
secondary indexes, whose records never contain any system
columns, such as DB_TRX_ID. */
ut_d(dict_table_add_system_columns(index->table, index->table->heap));
-
- *pindex = index;
-
return(tuple);
}
-/******************************************************************//**
-Get the data size.
-@return size of fields */
-UNIV_INLINE
-ulint
-ibuf_rec_get_size(
-/*==============*/
- const rec_t* rec, /*!< in: ibuf record */
- const byte* types, /*!< in: fields */
- ulint n_fields, /*!< in: number of fields */
- ulint comp) /*!< in: 0=ROW_FORMAT=REDUNDANT,
- nonzero=ROW_FORMAT=COMPACT */
+/** Removes a page from the free list and frees it to the fsp system.
+@param mtr mini-transaction
+@return error code
+@retval DB_SUCCESS if more work may remain to be done
+@retval DB_SUCCESS_LOCKED_REC if everything was freed */
+ATTRIBUTE_COLD static dberr_t ibuf_remove_free_page(mtr_t &mtr)
{
- ulint i;
- ulint field_offset;
- ulint types_offset;
- ulint size = 0;
-
- field_offset = IBUF_REC_FIELD_USER;
- types_offset = DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE;
-
- for (i = 0; i < n_fields; i++) {
- ulint len;
- dtype_t dtype;
-
- rec_get_nth_field_offs_old(rec, i + field_offset, &len);
-
- if (len != UNIV_SQL_NULL) {
- size += len;
- } else {
- dtype_new_read_for_order_and_null_size(&dtype, types);
-
- size += dtype_get_sql_null_size(&dtype, comp);
- }
-
- types += types_offset;
- }
-
- return(size);
-}
-
-#ifdef UNIV_DEBUG
-# define ibuf_rec_get_volume(mtr,rec) ibuf_rec_get_volume_func(mtr,rec)
-#else /* UNIV_DEBUG */
-# define ibuf_rec_get_volume(mtr,rec) ibuf_rec_get_volume_func(rec)
-#endif
+ log_free_check();
-/********************************************************************//**
-Returns the space taken by a stored non-clustered index entry if converted to
-an index record.
-@return size of index record in bytes + an upper limit of the space
-taken in the page directory */
-static
-ulint
-ibuf_rec_get_volume_func(
-/*=====================*/
-#ifdef UNIV_DEBUG
- mtr_t* mtr, /*!< in: mini-transaction owning rec */
-#endif /* UNIV_DEBUG */
- const rec_t* ibuf_rec)/*!< in: ibuf record */
-{
- ulint len;
- const byte* data;
- const byte* types;
- ulint n_fields;
- ulint data_size;
- ulint comp;
- ibuf_op_t op;
- ulint info_len;
-
- ut_ad(mtr->memo_contains_page_flagged(ibuf_rec, MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_S_FIX));
- ut_ad(ibuf_inside(mtr));
- ut_ad(rec_get_n_fields_old(ibuf_rec) > 2);
-
- data = rec_get_nth_field_old(ibuf_rec, IBUF_REC_FIELD_MARKER, &len);
- ut_a(len == 1);
- ut_a(*data == 0);
-
- types = rec_get_nth_field_old(
- ibuf_rec, IBUF_REC_FIELD_METADATA, &len);
-
- ibuf_rec_get_info(mtr, ibuf_rec, &op, &comp, &info_len, NULL);
-
- if (op == IBUF_OP_DELETE_MARK || op == IBUF_OP_DELETE) {
- /* Delete-marking a record doesn't take any
- additional space, and while deleting a record
- actually frees up space, we have to play it safe and
- pretend it takes no additional space (the record
- might not exist, etc.). */
-
- return(0);
- } else if (comp) {
- dtuple_t* entry;
- ulint volume;
- dict_index_t* dummy_index;
- mem_heap_t* heap = mem_heap_create(500);
-
- entry = ibuf_build_entry_from_ibuf_rec(mtr, ibuf_rec,
- heap, &dummy_index);
-
- volume = rec_get_converted_size(dummy_index, entry, 0);
-
- ibuf_dummy_index_free(dummy_index);
- mem_heap_free(heap);
-
- return(volume + page_dir_calc_reserved_space(1));
- }
-
- types += info_len;
- n_fields = rec_get_n_fields_old(ibuf_rec)
- - IBUF_REC_FIELD_USER;
-
- data_size = ibuf_rec_get_size(ibuf_rec, types, n_fields, comp);
-
- return(data_size + rec_get_converted_extra_size(data_size, n_fields, 0)
- + page_dir_calc_reserved_space(1));
-}
-
-/*********************************************************************//**
-Builds the tuple to insert to an ibuf tree when we have an entry for a
-non-clustered index.
-
-NOTE that the original entry must be kept because we copy pointers to
-its fields.
-
-@return own: entry to insert into an ibuf index tree */
-static
-dtuple_t*
-ibuf_entry_build(
-/*=============*/
- ibuf_op_t op, /*!< in: operation type */
- dict_index_t* index, /*!< in: non-clustered index */
- const dtuple_t* entry, /*!< in: entry for a non-clustered index */
- ulint space, /*!< in: space id */
- ulint page_no,/*!< in: index page number where entry should
- be inserted */
- ulint counter,/*!< in: counter value;
- ULINT_UNDEFINED=not used */
- mem_heap_t* heap) /*!< in: heap into which to build */
-{
- dtuple_t* tuple;
- dfield_t* field;
- const dfield_t* entry_field;
- ulint n_fields;
- byte* buf;
- byte* ti;
- byte* type_info;
- ulint i;
-
- ut_ad(counter != ULINT_UNDEFINED || op == IBUF_OP_INSERT);
- ut_ad(counter == ULINT_UNDEFINED || counter <= 0xFFFF);
- ut_ad(op < IBUF_OP_COUNT);
-
- /* We have to build a tuple with the following fields:
-
- 1-4) These are described at the top of this file.
-
- 5) The rest of the fields are copied from the entry.
-
- All fields in the tuple are ordered like the type binary in our
- insert buffer tree. */
-
- n_fields = dtuple_get_n_fields(entry);
-
- tuple = dtuple_create(heap, n_fields + IBUF_REC_FIELD_USER);
-
- /* 1) Space Id */
-
- field = dtuple_get_nth_field(tuple, IBUF_REC_FIELD_SPACE);
-
- buf = static_cast<byte*>(mem_heap_alloc(heap, 4));
-
- mach_write_to_4(buf, space);
-
- dfield_set_data(field, buf, 4);
-
- /* 2) Marker byte */
-
- field = dtuple_get_nth_field(tuple, IBUF_REC_FIELD_MARKER);
-
- buf = static_cast<byte*>(mem_heap_alloc(heap, 1));
-
- /* We set the marker byte zero */
-
- mach_write_to_1(buf, 0);
-
- dfield_set_data(field, buf, 1);
-
- /* 3) Page number */
-
- field = dtuple_get_nth_field(tuple, IBUF_REC_FIELD_PAGE);
-
- buf = static_cast<byte*>(mem_heap_alloc(heap, 4));
-
- mach_write_to_4(buf, page_no);
-
- dfield_set_data(field, buf, 4);
-
- /* 4) Type info, part #1 */
-
- if (counter == ULINT_UNDEFINED) {
- i = dict_table_is_comp(index->table) ? 1 : 0;
- } else {
- ut_ad(counter <= 0xFFFF);
- i = IBUF_REC_INFO_SIZE;
- }
-
- ti = type_info = static_cast<byte*>(
- mem_heap_alloc(
- heap,
- i + n_fields * DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE));
-
- switch (i) {
- default:
- ut_error;
- break;
- case 1:
- /* set the flag for ROW_FORMAT=COMPACT */
- *ti++ = 0;
- /* fall through */
- case 0:
- /* the old format does not allow delete buffering */
- ut_ad(op == IBUF_OP_INSERT);
- break;
- case IBUF_REC_INFO_SIZE:
- mach_write_to_2(ti + IBUF_REC_OFFSET_COUNTER, counter);
-
- ti[IBUF_REC_OFFSET_TYPE] = (byte) op;
- ti[IBUF_REC_OFFSET_FLAGS] = dict_table_is_comp(index->table)
- ? IBUF_REC_COMPACT : 0;
- ti += IBUF_REC_INFO_SIZE;
- break;
- }
-
- /* 5+) Fields from the entry */
-
- for (i = 0; i < n_fields; i++) {
- ulint fixed_len;
- const dict_field_t* ifield;
-
- field = dtuple_get_nth_field(tuple, i + IBUF_REC_FIELD_USER);
- entry_field = dtuple_get_nth_field(entry, i);
- dfield_copy(field, entry_field);
-
- ifield = dict_index_get_nth_field(index, i);
- ut_ad(!ifield->descending);
- /* Prefix index columns of fixed-length columns are of
- fixed length. However, in the function call below,
- dfield_get_type(entry_field) contains the fixed length
- of the column in the clustered index. Replace it with
- the fixed length of the secondary index column. */
- fixed_len = ifield->fixed_len;
-
-#ifdef UNIV_DEBUG
- if (fixed_len) {
- /* dict_index_add_col() should guarantee these */
- ut_ad(fixed_len <= (ulint)
- dfield_get_type(entry_field)->len);
- if (ifield->prefix_len) {
- ut_ad(ifield->prefix_len == fixed_len);
- } else {
- ut_ad(fixed_len == (ulint)
- dfield_get_type(entry_field)->len);
- }
- }
-#endif /* UNIV_DEBUG */
-
- dtype_new_store_for_order_and_null_size(
- ti, dfield_get_type(entry_field), fixed_len);
- ti += DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE;
- }
-
- /* 4) Type info, part #2 */
-
- field = dtuple_get_nth_field(tuple, IBUF_REC_FIELD_METADATA);
-
- dfield_set_data(field, type_info, ulint(ti - type_info));
-
- /* Set all the types in the new tuple binary */
-
- dtuple_set_types_binary(tuple, n_fields + IBUF_REC_FIELD_USER);
-
- return(tuple);
-}
-
-/*********************************************************************//**
-Builds a search tuple used to search buffered inserts for an index page.
-This is for >= 4.1.x format records.
-@return own: search tuple */
-static
-dtuple_t*
-ibuf_search_tuple_build(
-/*====================*/
- ulint space, /*!< in: space id */
- ulint page_no,/*!< in: index page number */
- mem_heap_t* heap) /*!< in: heap into which to build */
-{
- dtuple_t* tuple;
- dfield_t* field;
- byte* buf;
-
- tuple = dtuple_create(heap, IBUF_REC_FIELD_METADATA);
-
- /* Store the space id in tuple */
-
- field = dtuple_get_nth_field(tuple, IBUF_REC_FIELD_SPACE);
-
- buf = static_cast<byte*>(mem_heap_alloc(heap, 4));
-
- mach_write_to_4(buf, space);
-
- dfield_set_data(field, buf, 4);
-
- /* Store the new format record marker byte */
-
- field = dtuple_get_nth_field(tuple, IBUF_REC_FIELD_MARKER);
-
- buf = static_cast<byte*>(mem_heap_alloc(heap, 1));
-
- mach_write_to_1(buf, 0);
-
- dfield_set_data(field, buf, 1);
-
- /* Store the page number in tuple */
-
- field = dtuple_get_nth_field(tuple, IBUF_REC_FIELD_PAGE);
-
- buf = static_cast<byte*>(mem_heap_alloc(heap, 4));
-
- mach_write_to_4(buf, page_no);
-
- dfield_set_data(field, buf, 4);
-
- dtuple_set_types_binary(tuple, IBUF_REC_FIELD_METADATA);
-
- return(tuple);
-}
-
-/*********************************************************************//**
-Checks if there are enough pages in the free list of the ibuf tree that we
-dare to start a pessimistic insert to the insert buffer.
-@return whether enough free pages in list */
-static inline bool ibuf_data_enough_free_for_insert()
-{
- mysql_mutex_assert_owner(&ibuf_mutex);
-
- /* We want a big margin of free pages, because a B-tree can sometimes
- grow in size also if records are deleted from it, as the node pointers
- can change, and we must make sure that we are able to delete the
- inserts buffered for pages that we read to the buffer pool, without
- any risk of running out of free space in the insert buffer. */
-
- return(ibuf.free_list_len >= (ibuf.size / 2) + 3 * ibuf.height);
-}
-
-/*********************************************************************//**
-Checks if there are enough pages in the free list of the ibuf tree that we
-should remove them and free to the file space management.
-@return TRUE if enough free pages in list */
-UNIV_INLINE
-ibool
-ibuf_data_too_much_free(void)
-/*=========================*/
-{
- mysql_mutex_assert_owner(&ibuf_mutex);
-
- return(ibuf.free_list_len >= 3 + (ibuf.size / 2) + 3 * ibuf.height);
-}
-
-/** Allocate a change buffer page.
-@retval true on success
-@retval false if no space left */
-static bool ibuf_add_free_page()
-{
- mtr_t mtr;
- page_t* header_page;
- buf_block_t* block;
-
- mtr.start();
- /* Acquire the fsp latch before the ibuf header, obeying the latching
- order */
- mtr.x_lock_space(fil_system.sys_space);
- header_page = ibuf_header_page_get(&mtr);
- if (!header_page) {
- mtr.commit();
- return false;
- }
-
- /* Allocate a new page: NOTE that if the page has been a part of a
- non-clustered index which has subsequently been dropped, then the
- page may have buffered inserts in the insert buffer, and these
- should be deleted from there. These get deleted when the page
- allocation creates the page in buffer. Thus the call below may end
- up calling the insert buffer routines and, as we yet have no latches
- to insert buffer tree pages, these routines can run without a risk
- of a deadlock. This is the reason why we created a special ibuf
- header page apart from the ibuf tree. */
-
- dberr_t err;
- block = fseg_alloc_free_page_general(
- header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER, 0, FSP_UP,
- false, &mtr, &mtr, &err);
-
- if (!block) {
- mtr.commit();
- return false;
- }
-
- ut_ad(block->page.lock.not_recursive());
- ibuf_enter(&mtr);
- mysql_mutex_lock(&ibuf_mutex);
-
- mtr.write<2>(*block, block->page.frame + FIL_PAGE_TYPE,
- FIL_PAGE_IBUF_FREE_LIST);
- buf_block_t* ibuf_root = ibuf_tree_root_get(&mtr);
- if (UNIV_UNLIKELY(!ibuf_root)) {
-corrupted:
- /* Do not bother to try to free the allocated block, because
- the change buffer is seriously corrupted already. */
- mysql_mutex_unlock(&ibuf_mutex);
- ibuf_mtr_commit(&mtr);
- return false;
- }
-
- /* Add the page to the free list and update the ibuf size data */
-
- err = flst_add_last(ibuf_root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
- block, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE,
- &mtr);
- if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
- goto corrupted;
- }
-
- /* Set the bit indicating that this page is now an ibuf tree page
- (level 2 page) */
-
- const page_id_t page_id(block->page.id());
- buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
-
- if (UNIV_UNLIKELY(!bitmap_page)) {
- goto corrupted;
- }
-
- ibuf.seg_size++;
- ibuf.free_list_len++;
-
- mysql_mutex_unlock(&ibuf_mutex);
-
- ibuf_bitmap_page_set_bits<IBUF_BITMAP_IBUF>(bitmap_page, page_id,
- srv_page_size, true, &mtr);
- ibuf_mtr_commit(&mtr);
- return true;
-}
-
-/*********************************************************************//**
-Removes a page from the free list and frees it to the fsp system. */
-static void ibuf_remove_free_page()
-{
- mtr_t mtr;
- mtr_t mtr2;
- page_t* header_page;
-
- log_free_check();
-
- mtr_start(&mtr);
- /* Acquire the fsp latch before the ibuf header, obeying the latching
- order */
-
- mtr.x_lock_space(fil_system.sys_space);
- header_page = ibuf_header_page_get(&mtr);
-
- /* Prevent pessimistic inserts to insert buffer trees for a while */
- ibuf_enter(&mtr);
- mysql_mutex_lock(&ibuf_pessimistic_insert_mutex);
- mysql_mutex_lock(&ibuf_mutex);
-
- if (!header_page || !ibuf_data_too_much_free()) {
-early_exit:
- mysql_mutex_unlock(&ibuf_mutex);
- mysql_mutex_unlock(&ibuf_pessimistic_insert_mutex);
-
- ibuf_mtr_commit(&mtr);
-
- return;
- }
-
- ibuf_mtr_start(&mtr2);
-
- buf_block_t* root = ibuf_tree_root_get(&mtr2);
-
- if (UNIV_UNLIKELY(!root)) {
- ibuf_mtr_commit(&mtr2);
- goto early_exit;
- }
-
- mysql_mutex_unlock(&ibuf_mutex);
-
- const uint32_t page_no = flst_get_last(PAGE_HEADER
- + PAGE_BTR_IBUF_FREE_LIST
- + root->page.frame).page;
-
- /* NOTE that we must release the latch on the ibuf tree root
- because in fseg_free_page we access level 1 pages, and the root
- is a level 2 page. */
-
- ibuf_mtr_commit(&mtr2);
- ibuf_exit(&mtr);
-
- /* Since pessimistic inserts were prevented, we know that the
- page is still in the free list. NOTE that also deletes may take
- pages from the free list, but they take them from the start, and
- the free list was so long that they cannot have taken the last
- page from it. */
-
- compile_time_assert(IBUF_SPACE_ID == 0);
- const page_id_t page_id{IBUF_SPACE_ID, page_no};
- buf_block_t* bitmap_page = nullptr;
- dberr_t err = fseg_free_page(
- header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
- fil_system.sys_space, page_no, &mtr);
-
- if (err != DB_SUCCESS) {
- goto func_exit;
- }
-
- ibuf_enter(&mtr);
-
- mysql_mutex_lock(&ibuf_mutex);
-
- root = ibuf_tree_root_get(&mtr, &err);
- if (UNIV_UNLIKELY(!root)) {
- mysql_mutex_unlock(&ibuf_pessimistic_insert_mutex);
- goto func_exit;
- }
-
- ut_ad(page_no == flst_get_last(PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST
- + root->page.frame).page);
-
- /* Remove the page from the free list and update the ibuf size data */
- if (buf_block_t* block =
- buf_page_get_gen(page_id, 0, RW_X_LATCH, nullptr, BUF_GET,
- &mtr, &err)) {
- err = flst_remove(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
- block,
- PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE,
- &mtr);
- }
-
- mysql_mutex_unlock(&ibuf_pessimistic_insert_mutex);
+ mtr.start();
- if (err == DB_SUCCESS) {
- ibuf.seg_size--;
- ibuf.free_list_len--;
- bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
- }
+ mtr.x_lock_space(fil_system.sys_space);
+ dberr_t err;
+ buf_block_t* header= buf_page_get_gen(ibuf_header, 0, RW_X_LATCH, nullptr,
+ BUF_GET, &mtr, &err);
+ if (!header)
+ {
func_exit:
- mysql_mutex_unlock(&ibuf_mutex);
-
- if (bitmap_page) {
- /* Set the bit indicating that this page is no more an
- ibuf tree page (level 2 page) */
- ibuf_bitmap_page_set_bits<IBUF_BITMAP_IBUF>(
- bitmap_page, page_id, srv_page_size, false, &mtr);
- }
-
- if (err == DB_SUCCESS) {
- buf_page_free(fil_system.sys_space, page_no, &mtr);
- }
-
- ibuf_mtr_commit(&mtr);
-}
-
-/***********************************************************************//**
-Frees excess pages from the ibuf free list. This function is called when an OS
-thread calls fsp services to allocate a new file segment, or a new page to a
-file segment, and the thread did not own the fsp latch before this call. */
-void
-ibuf_free_excess_pages(void)
-/*========================*/
-{
- if (UNIV_UNLIKELY(!ibuf.index)) return;
- /* Free at most a few pages at a time, so that we do not delay the
- requested service too much */
-
- for (ulint i = 0; i < 4; i++) {
-
- ibool too_much_free;
-
- mysql_mutex_lock(&ibuf_mutex);
- too_much_free = ibuf_data_too_much_free();
- mysql_mutex_unlock(&ibuf_mutex);
-
- if (!too_much_free) {
- return;
- }
-
- ibuf_remove_free_page();
- }
-}
-
-#ifdef UNIV_DEBUG
-# define ibuf_get_merge_page_nos(contract,rec,mtr,ids,pages,n_stored) \
- ibuf_get_merge_page_nos_func(contract,rec,mtr,ids,pages,n_stored)
-#else /* UNIV_DEBUG */
-# define ibuf_get_merge_page_nos(contract,rec,mtr,ids,pages,n_stored) \
- ibuf_get_merge_page_nos_func(contract,rec,ids,pages,n_stored)
-#endif /* UNIV_DEBUG */
-
-/*********************************************************************//**
-Reads page numbers from a leaf in an ibuf tree.
-@return a lower limit for the combined volume of records which will be
-merged */
-static
-ulint
-ibuf_get_merge_page_nos_func(
-/*=========================*/
- ibool contract,/*!< in: TRUE if this function is called to
- contract the tree, FALSE if this is called
- when a single page becomes full and we look
- if it pays to read also nearby pages */
- const rec_t* rec, /*!< in: insert buffer record */
-#ifdef UNIV_DEBUG
- mtr_t* mtr, /*!< in: mini-transaction holding rec */
-#endif /* UNIV_DEBUG */
- uint32_t* space_ids,/*!< in/out: space id's of the pages */
- uint32_t* page_nos,/*!< in/out: buffer for at least
- IBUF_MAX_N_PAGES_MERGED many page numbers;
- the page numbers are in an ascending order */
- ulint* n_stored)/*!< out: number of page numbers stored to
- page_nos in this function */
-{
- uint32_t prev_page_no;
- uint32_t prev_space_id;
- uint32_t first_page_no;
- uint32_t first_space_id;
- uint32_t rec_page_no;
- uint32_t rec_space_id;
- ulint sum_volumes;
- ulint volume_for_page;
- ulint rec_volume;
- ulint limit;
- ulint n_pages;
-
- ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_S_FIX));
- ut_ad(ibuf_inside(mtr));
-
- *n_stored = 0;
-
- if (page_rec_is_supremum(rec)) {
-
- rec = page_rec_get_prev_const(rec);
- if (UNIV_UNLIKELY(!rec)) {
-corruption:
- ut_ad("corrupted page" == 0);
- return 0;
- }
- }
-
- if (page_rec_is_infimum(rec)) {
- rec = page_rec_get_next_const(rec);
- if (!rec || page_rec_is_supremum(rec)) {
- return 0;
- }
- }
-
- limit = ut_min(IBUF_MAX_N_PAGES_MERGED,
- buf_pool_get_curr_size() / 4);
-
- first_page_no = ibuf_rec_get_page_no(mtr, rec);
- first_space_id = ibuf_rec_get_space(mtr, rec);
- n_pages = 0;
- prev_page_no = 0;
- prev_space_id = 0;
-
- /* Go backwards from the first rec until we reach the border of the
- 'merge area', or the page start or the limit of storeable pages is
- reached */
-
- while (!page_rec_is_infimum(rec) && UNIV_LIKELY(n_pages < limit)) {
-
- rec_page_no = ibuf_rec_get_page_no(mtr, rec);
- rec_space_id = ibuf_rec_get_space(mtr, rec);
-
- if (rec_space_id != first_space_id
- || (rec_page_no / IBUF_MERGE_AREA)
- != (first_page_no / IBUF_MERGE_AREA)) {
-
- break;
- }
-
- if (rec_page_no != prev_page_no
- || rec_space_id != prev_space_id) {
- n_pages++;
- }
-
- prev_page_no = rec_page_no;
- prev_space_id = rec_space_id;
-
- if (UNIV_UNLIKELY(!(rec = page_rec_get_prev_const(rec)))) {
- goto corruption;
- }
- }
-
- rec = page_rec_get_next_const(rec);
-
- /* At the loop start there is no prev page; we mark this with a pair
- of space id, page no (0, 0) for which there can never be entries in
- the insert buffer */
-
- prev_page_no = 0;
- prev_space_id = 0;
- sum_volumes = 0;
- volume_for_page = 0;
-
- while (*n_stored < limit && rec) {
- if (page_rec_is_supremum(rec)) {
- /* When no more records available, mark this with
- another 'impossible' pair of space id, page no */
- rec_page_no = 1;
- rec_space_id = 0;
- } else {
- rec_page_no = ibuf_rec_get_page_no(mtr, rec);
- rec_space_id = ibuf_rec_get_space(mtr, rec);
- /* In the system tablespace the smallest
- possible secondary index leaf page number is
- bigger than FSP_DICT_HDR_PAGE_NO (7).
- In all tablespaces, pages 0 and 1 are reserved
- for the allocation bitmap and the change
- buffer bitmap. In file-per-table tablespaces,
- a file segment inode page will be created at
- page 2 and the clustered index tree is created
- at page 3. So for file-per-table tablespaces,
- page 4 is the smallest possible secondary
- index leaf page. CREATE TABLESPACE also initially
- uses pages 2 and 3 for the first created table,
- but that table may be dropped, allowing page 2
- to be reused for a secondary index leaf page.
- To keep this assertion simple, just
- make sure the page is >= 2. */
- ut_ad(rec_page_no >= FSP_FIRST_INODE_PAGE_NO);
- }
-
-#ifdef UNIV_IBUF_DEBUG
- ut_a(*n_stored < IBUF_MAX_N_PAGES_MERGED);
-#endif
- if ((rec_space_id != prev_space_id
- || rec_page_no != prev_page_no)
- && (prev_space_id != 0 || prev_page_no != 0)) {
-
- if (contract
- || (prev_page_no == first_page_no
- && prev_space_id == first_space_id)
- || (volume_for_page
- > ((IBUF_MERGE_THRESHOLD - 1)
- * 4U << srv_page_size_shift
- / IBUF_PAGE_SIZE_PER_FREE_SPACE)
- / IBUF_MERGE_THRESHOLD)) {
-
- space_ids[*n_stored] = prev_space_id;
- page_nos[*n_stored] = prev_page_no;
-
- (*n_stored)++;
-
- sum_volumes += volume_for_page;
- }
-
- if (rec_space_id != first_space_id
- || rec_page_no / IBUF_MERGE_AREA
- != first_page_no / IBUF_MERGE_AREA) {
-
- break;
- }
-
- volume_for_page = 0;
- }
-
- if (rec_page_no == 1 && rec_space_id == 0) {
- /* Supremum record */
-
- break;
- }
-
- rec_volume = ibuf_rec_get_volume(mtr, rec);
-
- volume_for_page += rec_volume;
-
- prev_page_no = rec_page_no;
- prev_space_id = rec_space_id;
-
- rec = page_rec_get_next_const(rec);
- }
-
-#ifdef UNIV_IBUF_DEBUG
- ut_a(*n_stored <= IBUF_MAX_N_PAGES_MERGED);
-#endif
-#if 0
- fprintf(stderr, "Ibuf merge batch %lu pages %lu volume\n",
- *n_stored, sum_volumes);
-#endif
- return(sum_volumes);
-}
-
-/*******************************************************************//**
-Get the matching records for space id.
-@return current rec or NULL */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-const rec_t*
-ibuf_get_user_rec(
-/*===============*/
- btr_pcur_t* pcur, /*!< in: the current cursor */
- mtr_t* mtr) /*!< in: mini transaction */
-{
- do {
- const rec_t* rec = btr_pcur_get_rec(pcur);
-
- if (page_rec_is_user_rec(rec)) {
- return(rec);
- }
- } while (btr_pcur_move_to_next(pcur, mtr));
-
- return(NULL);
-}
-
-/*********************************************************************//**
-Reads page numbers for a space id from an ibuf tree.
-@return a lower limit for the combined volume of records which will be
-merged */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-ulint
-ibuf_get_merge_pages(
-/*=================*/
- btr_pcur_t* pcur, /*!< in/out: cursor */
- uint32_t space, /*!< in: space for which to merge */
- ulint limit, /*!< in: max page numbers to read */
- uint32_t* pages, /*!< out: pages read */
- uint32_t* spaces, /*!< out: spaces read */
- ulint* n_pages,/*!< out: number of pages read */
- mtr_t* mtr) /*!< in: mini transaction */
-{
- const rec_t* rec;
- ulint volume = 0;
-
- *n_pages = 0;
-
- while ((rec = ibuf_get_user_rec(pcur, mtr)) != 0
- && ibuf_rec_get_space(mtr, rec) == space
- && *n_pages < limit) {
-
- uint32_t page_no = ibuf_rec_get_page_no(mtr, rec);
-
- if (*n_pages == 0 || pages[*n_pages - 1] != page_no) {
- spaces[*n_pages] = space;
- pages[*n_pages] = page_no;
- ++*n_pages;
- }
-
- volume += ibuf_rec_get_volume(mtr, rec);
-
- btr_pcur_move_to_next(pcur, mtr);
- }
-
- return(volume);
-}
-
-/**
-Delete a change buffer record.
-@param[in] page_id page identifier
-@param[in,out] pcur persistent cursor positioned on the record
-@param[in] search_tuple search key for (space,page_no)
-@param[in,out] mtr mini-transaction
-@return whether mtr was committed (due to pessimistic operation) */
-static MY_ATTRIBUTE((warn_unused_result, nonnull))
-bool ibuf_delete_rec(const page_id_t page_id, btr_pcur_t* pcur,
- const dtuple_t* search_tuple, mtr_t* mtr);
-
-/** Delete the change buffer records for the given page id
-@param page_id page identifier */
-static void ibuf_delete_recs(const page_id_t page_id)
-{
- if (!ibuf.index || srv_read_only_mode)
- return;
- dfield_t dfield[IBUF_REC_FIELD_METADATA];
- dtuple_t tuple {0,IBUF_REC_FIELD_METADATA,IBUF_REC_FIELD_METADATA,
- dfield,0,nullptr
-#ifdef UNIV_DEBUG
- ,DATA_TUPLE_MAGIC_N
-#endif
- };
- byte space_id[4], page_no[4];
-
- mach_write_to_4(space_id, page_id.space());
- mach_write_to_4(page_no, page_id.page_no());
+ mtr.commit();
+ return err;
+ }
- dfield_set_data(&dfield[0], space_id, 4);
- dfield_set_data(&dfield[1], field_ref_zero, 1);
- dfield_set_data(&dfield[2], page_no, 4);
- dtuple_set_types_binary(&tuple, IBUF_REC_FIELD_METADATA);
+ buf_block_t *root= buf_page_get_gen(ibuf_root, 0, RW_X_LATCH,
+ nullptr, BUF_GET, &mtr, &err);
- mtr_t mtr;
-loop:
- btr_pcur_t pcur;
- pcur.btr_cur.page_cur.index= ibuf.index;
- ibuf_mtr_start(&mtr);
- if (btr_pcur_open(&tuple, PAGE_CUR_GE, BTR_MODIFY_LEAF, &pcur, &mtr))
+ if (UNIV_UNLIKELY(!root))
goto func_exit;
- if (!btr_pcur_is_on_user_rec(&pcur))
- {
- ut_ad(btr_pcur_is_after_last_on_page(&pcur));
- goto func_exit;
- }
- for (;;)
+ const uint32_t page_no= flst_get_last(PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST +
+ root->page.frame).page;
+ if (page_no == FIL_NULL)
{
- ut_ad(btr_pcur_is_on_user_rec(&pcur));
- const rec_t* ibuf_rec = btr_pcur_get_rec(&pcur);
- if (ibuf_rec_get_space(&mtr, ibuf_rec) != page_id.space()
- || ibuf_rec_get_page_no(&mtr, ibuf_rec) != page_id.page_no())
- break;
- /* Delete the record from ibuf */
- if (ibuf_delete_rec(page_id, &pcur, &tuple, &mtr))
- {
- /* Deletion was pessimistic and mtr was committed:
- we start from the beginning again */
- ut_ad(mtr.has_committed());
- goto loop;
- }
-
- if (btr_pcur_is_after_last_on_page(&pcur))
- {
- ibuf_mtr_commit(&mtr);
- btr_pcur_close(&pcur);
- goto loop;
- }
+ mtr.set_modified(*root);
+ fsp_init_file_page(fil_system.sys_space, root, &mtr);
+ err= DB_SUCCESS_LOCKED_REC;
+ goto func_exit;
}
-func_exit:
- ibuf_mtr_commit(&mtr);
- btr_pcur_close(&pcur);
-}
-
-/** Merge the change buffer to some pages. */
-static void ibuf_read_merge_pages(const uint32_t* space_ids,
- const uint32_t* page_nos, ulint n_stored)
-{
- for (ulint i = 0; i < n_stored; i++) {
- const uint32_t space_id = space_ids[i];
- fil_space_t* s = fil_space_t::get(space_id);
- if (!s) {
-tablespace_deleted:
- /* The tablespace was not found: remove all
- entries for it */
- ibuf_delete_for_discarded_space(space_id);
- while (i + 1 < n_stored
- && space_ids[i + 1] == space_id) {
- i++;
- }
- continue;
- }
-
- const ulint zip_size = s->zip_size(), size = s->size;
- s->release();
- mtr_t mtr;
-
- if (UNIV_LIKELY(page_nos[i] < size)) {
- mtr.start();
- dberr_t err;
- buf_block_t *block =
- buf_page_get_gen(page_id_t(space_id, page_nos[i]),
- zip_size, RW_X_LATCH, nullptr,
- BUF_GET_POSSIBLY_FREED,
- &mtr, &err, true);
- bool remove = !block
- || fil_page_get_type(block->page.frame)
- != FIL_PAGE_INDEX
- || !page_is_leaf(block->page.frame);
- mtr.commit();
- if (err == DB_TABLESPACE_DELETED) {
- goto tablespace_deleted;
- }
- if (!remove) {
- continue;
- }
- }
-
- if (srv_shutdown_state == SRV_SHUTDOWN_NONE
- || srv_fast_shutdown) {
- continue;
- }
-
- /* The following code works around a hang when the
- change buffer is corrupted, likely due to the
- failure of ibuf_merge_or_delete_for_page() to
- invoke ibuf_delete_recs() if (!bitmap_bits).
-
- It also introduced corruption by itself in the
- following scenario:
-
- (1) We merged buffered changes in buf_page_get_gen()
- (2) We committed the mini-transaction
- (3) Redo log and the page with the merged changes is written
- (4) A write completion callback thread evicts the page.
- (5) Other threads buffer changes for that page.
- (6) We will wrongly discard those newly buffered changes below.
-
- To prevent this scenario, we will only invoke this code
- on shutdown. A call to ibuf_max_size_update(0) will cause
- ibuf_insert_low() to refuse to insert anything into the
- change buffer. */
-
- /* Prevent an infinite loop, by removing entries from
- the change buffer in the case the bitmap bits were
- wrongly clear even though buffered changes exist. */
- ibuf_delete_recs(page_id_t(space_ids[i], page_nos[i]));
- }
-}
-
-/** Contract the change buffer by reading pages to the buffer pool.
-@return a lower limit for the combined size in bytes of entries which
-will be merged from ibuf trees to the pages read
-@retval 0 if ibuf.empty */
-ulint ibuf_contract()
-{
- if (UNIV_UNLIKELY(!ibuf.index)) return 0;
- mtr_t mtr;
- btr_cur_t cur;
- ulint sum_sizes;
- uint32_t page_nos[IBUF_MAX_N_PAGES_MERGED];
- uint32_t space_ids[IBUF_MAX_N_PAGES_MERGED];
-
- ibuf_mtr_start(&mtr);
-
- if (cur.open_leaf(true, ibuf.index, BTR_SEARCH_LEAF, &mtr) !=
- DB_SUCCESS) {
- return 0;
- }
-
- ut_ad(page_validate(btr_cur_get_page(&cur), ibuf.index));
-
- if (page_is_empty(btr_cur_get_page(&cur))) {
- /* If a B-tree page is empty, it must be the root page
- and the whole B-tree must be empty. InnoDB does not
- allow empty B-tree pages other than the root. */
- ut_ad(ibuf.empty);
- ut_ad(btr_cur_get_block(&cur)->page.id()
- == page_id_t(IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO));
-
- ibuf_mtr_commit(&mtr);
-
- return(0);
- }
-
- ulint n_pages = 0;
- sum_sizes = ibuf_get_merge_page_nos(TRUE,
- btr_cur_get_rec(&cur), &mtr,
- space_ids,
- page_nos, &n_pages);
- ibuf_mtr_commit(&mtr);
-
- ibuf_read_merge_pages(space_ids, page_nos, n_pages);
-
- return(sum_sizes + 1);
-}
-
-/*********************************************************************//**
-Contracts insert buffer trees by reading pages referring to space_id
-to the buffer pool.
-@returns number of pages merged.*/
-ulint
-ibuf_merge_space(
-/*=============*/
- ulint space) /*!< in: tablespace id to merge */
-{
- if (UNIV_UNLIKELY(!ibuf.index)) return 0;
- mtr_t mtr;
- btr_pcur_t pcur;
-
- dfield_t dfield[IBUF_REC_FIELD_METADATA];
- dtuple_t tuple {0, IBUF_REC_FIELD_METADATA,
- IBUF_REC_FIELD_METADATA,dfield,0,nullptr
-#ifdef UNIV_DEBUG
- , DATA_TUPLE_MAGIC_N
-#endif
- };
- byte space_id[4];
-
- mach_write_to_4(space_id, space);
-
- dfield_set_data(&dfield[0], space_id, 4);
- dfield_set_data(&dfield[1], field_ref_zero, 1);
- dfield_set_data(&dfield[2], field_ref_zero, 4);
-
- dtuple_set_types_binary(&tuple, IBUF_REC_FIELD_METADATA);
- ulint n_pages = 0;
-
- ut_ad(space < SRV_SPACE_ID_UPPER_BOUND);
-
- log_free_check();
- ibuf_mtr_start(&mtr);
-
- /* Position the cursor on the first matching record. */
-
- pcur.btr_cur.page_cur.index = ibuf.index;
- dberr_t err = btr_pcur_open(&tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF,
- &pcur, &mtr);
- ut_ad(err != DB_SUCCESS || page_validate(btr_pcur_get_page(&pcur),
- ibuf.index));
-
- ulint sum_sizes = 0;
- uint32_t pages[IBUF_MAX_N_PAGES_MERGED];
- uint32_t spaces[IBUF_MAX_N_PAGES_MERGED];
-
- if (err != DB_SUCCESS) {
- } else if (page_is_empty(btr_pcur_get_page(&pcur))) {
- /* If a B-tree page is empty, it must be the root page
- and the whole B-tree must be empty. InnoDB does not
- allow empty B-tree pages other than the root. */
- ut_ad(ibuf.empty);
- ut_ad(btr_pcur_get_block(&pcur)->page.id()
- == page_id_t(IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO));
- } else {
-
- sum_sizes = ibuf_get_merge_pages(
- &pcur, uint32_t(space), IBUF_MAX_N_PAGES_MERGED,
- &pages[0], &spaces[0], &n_pages,
- &mtr);
- ib::info() << "Size of pages merged " << sum_sizes;
- }
-
- ibuf_mtr_commit(&mtr);
-
- if (n_pages > 0) {
- ut_ad(n_pages <= UT_ARR_SIZE(pages));
-
-#ifdef UNIV_DEBUG
- for (ulint i = 0; i < n_pages; ++i) {
- ut_ad(spaces[i] == space);
- }
-#endif /* UNIV_DEBUG */
-
- ibuf_read_merge_pages(spaces, pages, n_pages);
- }
-
- return(n_pages);
-}
-
-/*********************************************************************//**
-Contract insert buffer trees after insert if they are too big. */
-UNIV_INLINE
-void
-ibuf_contract_after_insert(
-/*=======================*/
- ulint entry_size) /*!< in: size of a record which was inserted
- into an ibuf tree */
-{
- /* dirty comparison, to avoid contention on ibuf_mutex */
- if (ibuf.size < ibuf.max_size) {
- return;
- }
-
- /* Contract at least entry_size many bytes */
- ulint sum_sizes = 0;
- ulint size;
-
- do {
- size = ibuf_contract();
- sum_sizes += size;
- } while (size > 0 && sum_sizes < entry_size);
-}
-
-/** Determine if a change buffer record has been encountered already.
-@param rec change buffer record in the MySQL 5.5 format
-@param hash hash table of encountered records
-@param size number of elements in hash
-@retval true if a distinct record
-@retval false if this may be duplicating an earlier record */
-static bool ibuf_get_volume_buffered_hash(const rec_t *rec, ulint *hash,
- ulint size)
-{
- ut_ad(rec_get_n_fields_old(rec) > IBUF_REC_FIELD_USER);
- const ulint start= rec_get_field_start_offs(rec, IBUF_REC_FIELD_USER);
- const ulint len= rec_get_data_size_old(rec) - start;
- const uint32_t fold= my_crc32c(0, rec + start, len);
- hash+= (fold / (CHAR_BIT * sizeof *hash)) % size;
- ulint bitmask= static_cast<ulint>(1) << (fold % (CHAR_BIT * sizeof(*hash)));
-
- if (*hash & bitmask)
- return false;
-
- /* We have not seen this record yet. Remember it. */
- *hash|= bitmask;
- return true;
-}
-
-#ifdef UNIV_DEBUG
-# define ibuf_get_volume_buffered_count(mtr,rec,hash,size,n_recs) \
- ibuf_get_volume_buffered_count_func(mtr,rec,hash,size,n_recs)
-#else /* UNIV_DEBUG */
-# define ibuf_get_volume_buffered_count(mtr,rec,hash,size,n_recs) \
- ibuf_get_volume_buffered_count_func(rec,hash,size,n_recs)
-#endif /* UNIV_DEBUG */
-
-/*********************************************************************//**
-Update the estimate of the number of records on a page, and
-get the space taken by merging the buffered record to the index page.
-@return size of index record in bytes + an upper limit of the space
-taken in the page directory */
-static
-ulint
-ibuf_get_volume_buffered_count_func(
-/*================================*/
-#ifdef UNIV_DEBUG
- mtr_t* mtr, /*!< in: mini-transaction owning rec */
-#endif /* UNIV_DEBUG */
- const rec_t* rec, /*!< in: insert buffer record */
- ulint* hash, /*!< in/out: hash array */
- ulint size, /*!< in: number of elements in hash array */
- lint* n_recs) /*!< in/out: estimated number of records
- on the page that rec points to */
-{
- ulint len;
- ibuf_op_t ibuf_op;
- const byte* types;
- ulint n_fields;
-
- ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_S_FIX));
- ut_ad(ibuf_inside(mtr));
-
- n_fields = rec_get_n_fields_old(rec);
- ut_ad(n_fields > IBUF_REC_FIELD_USER);
- n_fields -= IBUF_REC_FIELD_USER;
-
- rec_get_nth_field_offs_old(rec, 1, &len);
- /* This function is only invoked when buffering new
- operations. All pre-4.1 records should have been merged
- when the database was started up. */
- ut_a(len == 1);
-
- if (rec_get_deleted_flag(rec, 0)) {
- /* This record has been merged already,
- but apparently the system crashed before
- the change was discarded from the buffer.
- Pretend that the record does not exist. */
- return(0);
- }
-
- types = rec_get_nth_field_old(rec, IBUF_REC_FIELD_METADATA, &len);
-
- switch (UNIV_EXPECT(int(len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE),
- IBUF_REC_INFO_SIZE)) {
- default:
- ut_error;
- case 0:
- /* This ROW_TYPE=REDUNDANT record does not include an
- operation counter. Exclude it from the *n_recs,
- because deletes cannot be buffered if there are
- old-style inserts buffered for the page. */
-
- len = ibuf_rec_get_size(rec, types, n_fields, 0);
-
- return(len
- + rec_get_converted_extra_size(len, n_fields, 0)
- + page_dir_calc_reserved_space(1));
- case 1:
- /* This ROW_TYPE=COMPACT record does not include an
- operation counter. Exclude it from the *n_recs,
- because deletes cannot be buffered if there are
- old-style inserts buffered for the page. */
- goto get_volume_comp;
-
- case IBUF_REC_INFO_SIZE:
- ibuf_op = (ibuf_op_t) types[IBUF_REC_OFFSET_TYPE];
- break;
- }
-
- switch (ibuf_op) {
- case IBUF_OP_INSERT:
- /* Inserts can be done by updating a delete-marked record.
- Because delete-mark and insert operations can be pointing to
- the same records, we must not count duplicates. */
- case IBUF_OP_DELETE_MARK:
- /* There must be a record to delete-mark.
- See if this record has been already buffered. */
- if (n_recs && ibuf_get_volume_buffered_hash(rec, hash, size)) {
- (*n_recs)++;
- }
-
- if (ibuf_op == IBUF_OP_DELETE_MARK) {
- /* Setting the delete-mark flag does not
- affect the available space on the page. */
- return(0);
- }
- break;
- case IBUF_OP_DELETE:
- /* A record will be removed from the page. */
- if (n_recs) {
- (*n_recs)--;
- }
- /* While deleting a record actually frees up space,
- we have to play it safe and pretend that it takes no
- additional space (the record might not exist, etc.). */
- return(0);
- default:
- ut_error;
- }
-
- ut_ad(ibuf_op == IBUF_OP_INSERT);
-
-get_volume_comp:
- {
- dtuple_t* entry;
- ulint volume;
- dict_index_t* dummy_index;
- mem_heap_t* heap = mem_heap_create(500);
-
- entry = ibuf_build_entry_from_ibuf_rec(
- mtr, rec, heap, &dummy_index);
- volume = rec_get_converted_size(dummy_index, entry, 0);
+ /* Since pessimistic inserts were prevented, we know that the
+ page is still in the free list. NOTE that also deletes may take
+ pages from the free list, but they take them from the start, and
+ the free list was so long that they cannot have taken the last
+ page from it. */
- ibuf_dummy_index_free(dummy_index);
- mem_heap_free(heap);
+ err= fseg_free_page(header->page.frame + PAGE_DATA, fil_system.sys_space,
+ page_no, &mtr);
- return(volume + page_dir_calc_reserved_space(1));
- }
-}
-
-/*********************************************************************//**
-Gets an upper limit for the combined size of entries buffered in the insert
-buffer for a given page.
-@return upper limit for the volume of buffered inserts for the index
-page, in bytes; srv_page_size, if the entries for the index page span
-several pages in the insert buffer */
-static
-ulint
-ibuf_get_volume_buffered(
-/*=====================*/
- const btr_pcur_t*pcur, /*!< in: pcur positioned at a place in an
- insert buffer tree where we would insert an
- entry for the index page whose number is
- page_no, latch mode has to be BTR_MODIFY_PREV
- or BTR_MODIFY_TREE */
- ulint space, /*!< in: space id */
- ulint page_no,/*!< in: page number of an index page */
- lint* n_recs, /*!< in/out: minimum number of records on the
- page after the buffered changes have been
- applied, or NULL to disable the counting */
- mtr_t* mtr) /*!< in: mini-transaction of pcur */
-{
- ulint volume;
- const rec_t* rec;
- const page_t* page;
- const page_t* prev_page;
- const page_t* next_page;
- /* bitmap of buffered recs */
- ulint hash_bitmap[128 / sizeof(ulint)];
-
- ut_ad((pcur->latch_mode == BTR_MODIFY_PREV)
- || (pcur->latch_mode == BTR_MODIFY_TREE));
-
- /* Count the volume of inserts earlier in the alphabetical order than
- pcur */
-
- volume = 0;
-
- if (n_recs) {
- memset(hash_bitmap, 0, sizeof hash_bitmap);
- }
-
- rec = btr_pcur_get_rec(pcur);
- page = page_align(rec);
- ut_ad(page_validate(page, ibuf.index));
-
- if (page_rec_is_supremum(rec)
- && UNIV_UNLIKELY(!(rec = page_rec_get_prev_const(rec)))) {
-corruption:
- ut_ad("corrupted page" == 0);
- return srv_page_size;
- }
-
- uint32_t prev_page_no;
-
- for (; !page_rec_is_infimum(rec); ) {
- ut_ad(page_align(rec) == page);
-
- if (page_no != ibuf_rec_get_page_no(mtr, rec)
- || space != ibuf_rec_get_space(mtr, rec)) {
-
- goto count_later;
- }
-
- volume += ibuf_get_volume_buffered_count(
- mtr, rec,
- hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
-
- if (UNIV_UNLIKELY(!(rec = page_rec_get_prev_const(rec)))) {
- goto corruption;
- }
- }
-
- /* Look at the previous page */
-
- prev_page_no = btr_page_get_prev(page);
-
- if (prev_page_no == FIL_NULL) {
-
- goto count_later;
- }
-
- if (buf_block_t* block =
- buf_page_get(page_id_t(IBUF_SPACE_ID, prev_page_no),
- 0, RW_X_LATCH, mtr)) {
- prev_page = buf_block_get_frame(block);
- ut_ad(page_validate(prev_page, ibuf.index));
- } else {
- return srv_page_size;
- }
-
- static_assert(FIL_PAGE_NEXT % 4 == 0, "alignment");
- static_assert(FIL_PAGE_OFFSET % 4 == 0, "alignment");
-
- if (UNIV_UNLIKELY(memcmp_aligned<4>(prev_page + FIL_PAGE_NEXT,
- page + FIL_PAGE_OFFSET, 4))) {
- return srv_page_size;
- }
-
- rec = page_rec_get_prev_const(page_get_supremum_rec(prev_page));
-
- if (UNIV_UNLIKELY(!rec)) {
- goto corruption;
- }
-
- for (;;) {
- ut_ad(page_align(rec) == prev_page);
-
- if (page_rec_is_infimum(rec)) {
-
- /* We cannot go to yet a previous page, because we
- do not have the x-latch on it, and cannot acquire one
- because of the latching order: we have to give up */
-
- return(srv_page_size);
- }
-
- if (page_no != ibuf_rec_get_page_no(mtr, rec)
- || space != ibuf_rec_get_space(mtr, rec)) {
-
- goto count_later;
- }
-
- volume += ibuf_get_volume_buffered_count(
- mtr, rec,
- hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
-
- if (UNIV_UNLIKELY(!(rec = page_rec_get_prev_const(rec)))) {
- goto corruption;
- }
- }
-
-count_later:
- rec = btr_pcur_get_rec(pcur);
-
- if (!page_rec_is_supremum(rec)) {
- rec = page_rec_get_next_const(rec);
- }
-
- for (; !page_rec_is_supremum(rec);
- rec = page_rec_get_next_const(rec)) {
- if (UNIV_UNLIKELY(!rec)) {
- return srv_page_size;
- }
- if (page_no != ibuf_rec_get_page_no(mtr, rec)
- || space != ibuf_rec_get_space(mtr, rec)) {
-
- return(volume);
- }
-
- volume += ibuf_get_volume_buffered_count(
- mtr, rec,
- hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
- }
-
- /* Look at the next page */
-
- uint32_t next_page_no = btr_page_get_next(page);
-
- if (next_page_no == FIL_NULL) {
-
- return(volume);
- }
-
- if (buf_block_t* block =
- buf_page_get(page_id_t(IBUF_SPACE_ID, next_page_no),
- 0, RW_X_LATCH, mtr)) {
- next_page = buf_block_get_frame(block);
- ut_ad(page_validate(next_page, ibuf.index));
- } else {
- return srv_page_size;
- }
-
- static_assert(FIL_PAGE_PREV % 4 == 0, "alignment");
- static_assert(FIL_PAGE_OFFSET % 4 == 0, "alignment");
-
- if (UNIV_UNLIKELY(memcmp_aligned<4>(next_page + FIL_PAGE_PREV,
- page + FIL_PAGE_OFFSET, 4))) {
- return 0;
- }
-
- rec = page_get_infimum_rec(next_page);
- rec = page_rec_get_next_const(rec);
-
- for (; ; rec = page_rec_get_next_const(rec)) {
- if (!rec || page_rec_is_supremum(rec)) {
- /* We give up */
- return(srv_page_size);
- }
-
- ut_ad(page_align(rec) == next_page);
-
- if (page_no != ibuf_rec_get_page_no(mtr, rec)
- || space != ibuf_rec_get_space(mtr, rec)) {
-
- return(volume);
- }
-
- volume += ibuf_get_volume_buffered_count(
- mtr, rec,
- hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
- }
-}
-
-/*********************************************************************//**
-Reads the biggest tablespace id from the high end of the insert buffer
-tree and updates the counter in fil_system. */
-void
-ibuf_update_max_tablespace_id(void)
-/*===============================*/
-{
- if (UNIV_UNLIKELY(!ibuf.index)) return;
- const rec_t* rec;
- const byte* field;
- ulint len;
- btr_pcur_t pcur;
- mtr_t mtr;
-
- ut_ad(!ibuf.index->table->not_redundant());
-
- ibuf_mtr_start(&mtr);
-
- if (pcur.open_leaf(false, ibuf.index, BTR_SEARCH_LEAF, &mtr)
- != DB_SUCCESS) {
-func_exit:
- ibuf_mtr_commit(&mtr);
- return;
- }
-
- ut_ad(page_validate(btr_pcur_get_page(&pcur), ibuf.index));
-
- if (!btr_pcur_move_to_prev(&pcur, &mtr)
- || btr_pcur_is_before_first_on_page(&pcur)) {
- goto func_exit;
- }
-
- rec = btr_pcur_get_rec(&pcur);
-
- field = rec_get_nth_field_old(rec, IBUF_REC_FIELD_SPACE, &len);
-
- ut_a(len == 4);
-
- const uint32_t max_space_id = mach_read_from_4(field);
-
- ibuf_mtr_commit(&mtr);
-
- /* printf("Maximum space id in insert buffer %lu\n", max_space_id); */
-
- fil_set_max_space_id_if_bigger(max_space_id);
-}
-
-#ifdef UNIV_DEBUG
-# define ibuf_get_entry_counter_low(mtr,rec,space,page_no) \
- ibuf_get_entry_counter_low_func(mtr,rec,space,page_no)
-#else /* UNIV_DEBUG */
-# define ibuf_get_entry_counter_low(mtr,rec,space,page_no) \
- ibuf_get_entry_counter_low_func(rec,space,page_no)
-#endif
-/****************************************************************//**
-Helper function for ibuf_get_entry_counter_func. Checks if rec is for
-(space, page_no), and if so, reads counter value from it and returns
-that + 1.
-@retval ULINT_UNDEFINED if the record does not contain any counter
-@retval 0 if the record is not for (space, page_no)
-@retval 1 + previous counter value, otherwise */
-static
-ulint
-ibuf_get_entry_counter_low_func(
-/*============================*/
-#ifdef UNIV_DEBUG
- mtr_t* mtr, /*!< in: mini-transaction of rec */
-#endif /* UNIV_DEBUG */
- const rec_t* rec, /*!< in: insert buffer record */
- ulint space, /*!< in: space id */
- ulint page_no) /*!< in: page number */
-{
- ulint counter;
- const byte* field;
- ulint len;
-
- ut_ad(ibuf_inside(mtr));
- ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
- | MTR_MEMO_PAGE_S_FIX));
- ut_ad(rec_get_n_fields_old(rec) > 2);
-
- field = rec_get_nth_field_old(rec, IBUF_REC_FIELD_MARKER, &len);
-
- ut_a(len == 1);
-
- /* Check the tablespace identifier. */
- field = rec_get_nth_field_old(rec, IBUF_REC_FIELD_SPACE, &len);
-
- ut_a(len == 4);
-
- if (mach_read_from_4(field) != space) {
-
- return(0);
- }
-
- /* Check the page offset. */
- field = rec_get_nth_field_old(rec, IBUF_REC_FIELD_PAGE, &len);
- ut_a(len == 4);
-
- if (mach_read_from_4(field) != page_no) {
-
- return(0);
- }
-
- /* Check if the record contains a counter field. */
- field = rec_get_nth_field_old(rec, IBUF_REC_FIELD_METADATA, &len);
-
- switch (len % DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE) {
- default:
- ut_error;
- case 0: /* ROW_FORMAT=REDUNDANT */
- case 1: /* ROW_FORMAT=COMPACT */
- return(ULINT_UNDEFINED);
-
- case IBUF_REC_INFO_SIZE:
- counter = mach_read_from_2(field + IBUF_REC_OFFSET_COUNTER);
- ut_a(counter < 0xFFFF);
- return(counter + 1);
- }
-}
-
-#ifdef UNIV_DEBUG
-# define ibuf_get_entry_counter(space,page_no,rec,mtr,exact_leaf) \
- ibuf_get_entry_counter_func(space,page_no,rec,mtr,exact_leaf)
-#else /* UNIV_DEBUG */
-# define ibuf_get_entry_counter(space,page_no,rec,mtr,exact_leaf) \
- ibuf_get_entry_counter_func(space,page_no,rec,exact_leaf)
-#endif /* UNIV_DEBUG */
-
-/****************************************************************//**
-Calculate the counter field for an entry based on the current
-last record in ibuf for (space, page_no).
-@return the counter field, or ULINT_UNDEFINED
-if we should abort this insertion to ibuf */
-static
-ulint
-ibuf_get_entry_counter_func(
-/*========================*/
- ulint space, /*!< in: space id of entry */
- ulint page_no, /*!< in: page number of entry */
- const rec_t* rec, /*!< in: the record preceding the
- insertion point */
-#ifdef UNIV_DEBUG
- mtr_t* mtr, /*!< in: mini-transaction */
-#endif /* UNIV_DEBUG */
- ibool only_leaf) /*!< in: TRUE if this is the only
- leaf page that can contain entries
- for (space,page_no), that is, there
- was no exact match for (space,page_no)
- in the node pointer */
-{
- ut_ad(ibuf_inside(mtr));
- ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX));
- ut_ad(page_validate(page_align(rec), ibuf.index));
-
- if (page_rec_is_supremum(rec)) {
- /* This is just for safety. The record should be a
- page infimum or a user record. */
- ut_ad(0);
- return(ULINT_UNDEFINED);
- } else if (!page_rec_is_infimum(rec)) {
- return(ibuf_get_entry_counter_low(mtr, rec, space, page_no));
- } else if (only_leaf || !page_has_prev(page_align(rec))) {
- /* The parent node pointer did not contain the
- searched for (space, page_no), which means that the
- search ended on the correct page regardless of the
- counter value, and since we're at the infimum record,
- there are no existing records. */
- return(0);
- } else {
- /* We used to read the previous page here. It would
- break the latching order, because the caller has
- buffer-fixed an insert buffer bitmap page. */
- return(ULINT_UNDEFINED);
- }
-}
-
-
-/** Translates the ibuf free bits to the free space on a page in bytes.
-@param[in] physical_size page_size
-@param[in] bits value for ibuf bitmap bits
-@return maximum insert size after reorganize for the page */
-inline ulint
-ibuf_index_page_calc_free_from_bits(ulint physical_size, ulint bits)
-{
- ut_ad(bits < 4);
- ut_ad(physical_size > IBUF_PAGE_SIZE_PER_FREE_SPACE);
-
- if (bits == 3) {
- bits = 4;
- }
-
- return bits * physical_size / IBUF_PAGE_SIZE_PER_FREE_SPACE;
-}
-
-/** Buffer an operation in the insert/delete buffer, instead of doing it
-directly to the disk page, if this is possible.
-@param[in] mode BTR_MODIFY_PREV or BTR_INSERT_TREE
-@param[in] op operation type
-@param[in] no_counter TRUE=use 5.0.3 format; FALSE=allow delete
-buffering
-@param[in] entry index entry to insert
-@param[in] entry_size rec_get_converted_size(index, entry)
-@param[in,out] index index where to insert; must not be unique
-or clustered
-@param[in] page_id page id where to insert
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in,out] thr query thread
-@return DB_SUCCESS, DB_STRONG_FAIL or other error */
-static TRANSACTIONAL_TARGET MY_ATTRIBUTE((warn_unused_result))
-dberr_t
-ibuf_insert_low(
- btr_latch_mode mode,
- ibuf_op_t op,
- ibool no_counter,
- const dtuple_t* entry,
- ulint entry_size,
- dict_index_t* index,
- const page_id_t page_id,
- ulint zip_size,
- que_thr_t* thr)
-{
- big_rec_t* dummy_big_rec;
- btr_pcur_t pcur;
- btr_cur_t* cursor;
- dtuple_t* ibuf_entry;
- mem_heap_t* offsets_heap = NULL;
- mem_heap_t* heap;
- rec_offs* offsets = NULL;
- ulint buffered;
- lint min_n_recs;
- rec_t* ins_rec;
- buf_block_t* bitmap_page;
- buf_block_t* block = NULL;
- page_t* root;
- dberr_t err;
- ibool do_merge;
- uint32_t space_ids[IBUF_MAX_N_PAGES_MERGED];
- uint32_t page_nos[IBUF_MAX_N_PAGES_MERGED];
- ulint n_stored;
- mtr_t mtr;
- mtr_t bitmap_mtr;
-
- ut_a(!dict_index_is_clust(index));
- ut_ad(!dict_index_is_spatial(index));
- ut_ad(dtuple_check_typed(entry));
- ut_ad(!no_counter || op == IBUF_OP_INSERT);
- ut_ad(page_id.space() == index->table->space_id);
- ut_a(op < IBUF_OP_COUNT);
-
- do_merge = FALSE;
-
- /* Perform dirty comparison of ibuf.max_size and ibuf.size to
- reduce ibuf_mutex contention. This should be OK; at worst we
- are doing some excessive ibuf_contract() or occasionally
- skipping an ibuf_contract(). */
- const ulint max_size = ibuf.max_size;
-
- if (max_size == 0) {
- return(DB_STRONG_FAIL);
- }
-
- if (ibuf.size >= max_size + IBUF_CONTRACT_DO_NOT_INSERT) {
- /* Insert buffer is now too big, contract it but do not try
- to insert */
-
-
-#ifdef UNIV_IBUF_DEBUG
- fputs("Ibuf too big\n", stderr);
-#endif
- ibuf_contract();
-
- return(DB_STRONG_FAIL);
- }
-
- heap = mem_heap_create(1024);
-
- /* Build the entry which contains the space id and the page number
- as the first fields and the type information for other fields, and
- which will be inserted to the insert buffer. Using a counter value
- of 0xFFFF we find the last record for (space, page_no), from which
- we can then read the counter value N and use N + 1 in the record we
- insert. (We patch the ibuf_entry's counter field to the correct
- value just before actually inserting the entry.) */
-
- ibuf_entry = ibuf_entry_build(
- op, index, entry, page_id.space(), page_id.page_no(),
- no_counter ? ULINT_UNDEFINED : 0xFFFF, heap);
-
- /* Open a cursor to the insert buffer tree to calculate if we can add
- the new entry to it without exceeding the free space limit for the
- page. */
-
- if (mode == BTR_INSERT_TREE) {
- for (;;) {
- mysql_mutex_lock(&ibuf_pessimistic_insert_mutex);
- mysql_mutex_lock(&ibuf_mutex);
-
- if (UNIV_LIKELY(ibuf_data_enough_free_for_insert())) {
-
- break;
- }
-
- mysql_mutex_unlock(&ibuf_mutex);
- mysql_mutex_unlock(&ibuf_pessimistic_insert_mutex);
-
- if (!ibuf_add_free_page()) {
-
- mem_heap_free(heap);
- return(DB_STRONG_FAIL);
- }
- }
- }
-
- ibuf_mtr_start(&mtr);
- pcur.btr_cur.page_cur.index = ibuf.index;
-
- err = btr_pcur_open(ibuf_entry, PAGE_CUR_LE, mode, &pcur, &mtr);
- if (err != DB_SUCCESS) {
-func_exit:
- ibuf_mtr_commit(&mtr);
- ut_free(pcur.old_rec_buf);
- mem_heap_free(heap);
-
- if (err == DB_SUCCESS && mode == BTR_INSERT_TREE) {
- ibuf_contract_after_insert(entry_size);
- }
-
- if (do_merge) {
-#ifdef UNIV_IBUF_DEBUG
- ut_a(n_stored <= IBUF_MAX_N_PAGES_MERGED);
-#endif
- ibuf_read_merge_pages(space_ids, page_nos, n_stored);
- }
- return err;
- }
-
- ut_ad(page_validate(btr_pcur_get_page(&pcur), ibuf.index));
-
- /* Find out the volume of already buffered inserts for the same index
- page */
- min_n_recs = 0;
- buffered = ibuf_get_volume_buffered(&pcur,
- page_id.space(),
- page_id.page_no(),
- op == IBUF_OP_DELETE
- ? &min_n_recs
- : NULL, &mtr);
-
- const ulint physical_size = zip_size ? zip_size : srv_page_size;
-
- if (op == IBUF_OP_DELETE
- && (min_n_recs < 2 || buf_pool.watch_occurred(page_id))) {
- /* The page could become empty after the record is
- deleted, or the page has been read in to the buffer
- pool. Refuse to buffer the operation. */
-
- /* The buffer pool watch is needed for IBUF_OP_DELETE
- because of latching order considerations. We can
- check buf_pool_watch_occurred() only after latching
- the insert buffer B-tree pages that contain buffered
- changes for the page. We never buffer IBUF_OP_DELETE,
- unless some IBUF_OP_INSERT or IBUF_OP_DELETE_MARK have
- been previously buffered for the page. Because there
- are buffered operations for the page, the insert
- buffer B-tree page latches held by mtr will guarantee
- that no changes for the user page will be merged
- before mtr_commit(&mtr). We must not mtr_commit(&mtr)
- until after the IBUF_OP_DELETE has been buffered. */
-
-fail_exit:
- if (mode == BTR_INSERT_TREE) {
- mysql_mutex_unlock(&ibuf_mutex);
- mysql_mutex_unlock(&ibuf_pessimistic_insert_mutex);
- }
-
- err = DB_STRONG_FAIL;
- goto func_exit;
- }
-
- /* After this point, the page could still be loaded to the
- buffer pool, but we do not have to care about it, since we are
- holding a latch on the insert buffer leaf page that contains
- buffered changes for (space, page_no). If the page enters the
- buffer pool, buf_page_t::read_complete() for (space, page_no) will
- have to acquire a latch on the same insert buffer leaf page,
- which it cannot do until we have buffered the IBUF_OP_DELETE
- and done mtr_commit(&mtr) to release the latch. */
-
- ibuf_mtr_start(&bitmap_mtr);
-
- bitmap_page = ibuf_bitmap_get_map_page(page_id, zip_size, &bitmap_mtr);
-
- /* We check if the index page is suitable for buffered entries */
-
- if (!bitmap_page || buf_pool.page_hash_contains(
- page_id, buf_pool.page_hash.cell_get(page_id.fold()))) {
-commit_exit:
- ibuf_mtr_commit(&bitmap_mtr);
- goto fail_exit;
- } else if (!lock_sys.rd_lock_try()) {
- goto commit_exit;
- } else {
- hash_cell_t* cell = lock_sys.rec_hash.cell_get(page_id.fold());
- lock_sys.rec_hash.latch(cell)->acquire();
- const lock_t* lock = lock_sys_t::get_first(*cell, page_id);
- lock_sys.rec_hash.latch(cell)->release();
- lock_sys.rd_unlock();
- if (lock) {
- goto commit_exit;
- }
- }
-
- if (op == IBUF_OP_INSERT) {
- ulint bits = ibuf_bitmap_page_get_bits(
- bitmap_page->page.frame, page_id, physical_size,
- IBUF_BITMAP_FREE, &bitmap_mtr);
-
- if (buffered + entry_size + page_dir_calc_reserved_space(1)
- > ibuf_index_page_calc_free_from_bits(physical_size,
- bits)) {
- /* Release the bitmap page latch early. */
- ibuf_mtr_commit(&bitmap_mtr);
-
- /* It may not fit */
- do_merge = TRUE;
-
- ibuf_get_merge_page_nos(FALSE,
- btr_pcur_get_rec(&pcur), &mtr,
- space_ids,
- page_nos, &n_stored);
-
- goto fail_exit;
- }
- }
-
- if (!no_counter) {
- /* Patch correct counter value to the entry to
- insert. This can change the insert position, which can
- result in the need to abort in some cases. */
- ulint counter = ibuf_get_entry_counter(
- page_id.space(), page_id.page_no(),
- btr_pcur_get_rec(&pcur), &mtr,
- btr_pcur_get_btr_cur(&pcur)->low_match
- < IBUF_REC_FIELD_METADATA);
- dfield_t* field;
-
- if (counter == ULINT_UNDEFINED) {
- goto commit_exit;
- }
-
- field = dtuple_get_nth_field(
- ibuf_entry, IBUF_REC_FIELD_METADATA);
- mach_write_to_2(
- (byte*) dfield_get_data(field)
- + IBUF_REC_OFFSET_COUNTER, counter);
- }
-
- /* Set the bitmap bit denoting that the insert buffer contains
- buffered entries for this index page, if the bit is not set yet */
- index->set_modified(bitmap_mtr);
- ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>(
- bitmap_page, page_id, physical_size, true, &bitmap_mtr);
- ibuf_mtr_commit(&bitmap_mtr);
-
- cursor = btr_pcur_get_btr_cur(&pcur);
-
- if (mode == BTR_MODIFY_PREV) {
- err = btr_cur_optimistic_insert(
- BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG,
- cursor, &offsets, &offsets_heap,
- ibuf_entry, &ins_rec,
- &dummy_big_rec, 0, thr, &mtr);
- block = btr_cur_get_block(cursor);
- ut_ad(block->page.id().space() == IBUF_SPACE_ID);
-
- /* If this is the root page, update ibuf.empty. */
- if (block->page.id().page_no() == FSP_IBUF_TREE_ROOT_PAGE_NO) {
- const page_t* root = buf_block_get_frame(block);
-
- ut_ad(page_get_space_id(root) == IBUF_SPACE_ID);
- ut_ad(page_get_page_no(root)
- == FSP_IBUF_TREE_ROOT_PAGE_NO);
-
- ibuf.empty = page_is_empty(root);
- }
- } else {
- ut_ad(mode == BTR_INSERT_TREE);
-
- /* We acquire an sx-latch to the root page before the insert,
- because a pessimistic insert releases the tree x-latch,
- which would cause the sx-latching of the root after that to
- break the latching order. */
- if (buf_block_t* ibuf_root = ibuf_tree_root_get(&mtr)) {
- root = ibuf_root->page.frame;
- } else {
- err = DB_CORRUPTION;
- mysql_mutex_unlock(&ibuf_pessimistic_insert_mutex);
- mysql_mutex_unlock(&ibuf_mutex);
- goto ibuf_insert_done;
- }
-
- err = btr_cur_optimistic_insert(
- BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG,
- cursor, &offsets, &offsets_heap,
- ibuf_entry, &ins_rec,
- &dummy_big_rec, 0, thr, &mtr);
-
- if (err == DB_FAIL) {
- err = btr_cur_pessimistic_insert(
- BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG,
- cursor, &offsets, &offsets_heap,
- ibuf_entry, &ins_rec,
- &dummy_big_rec, 0, thr, &mtr);
- }
-
- mysql_mutex_unlock(&ibuf_pessimistic_insert_mutex);
- ibuf_size_update(root);
- mysql_mutex_unlock(&ibuf_mutex);
- ibuf.empty = page_is_empty(root);
-
- block = btr_cur_get_block(cursor);
- ut_ad(block->page.id().space() == IBUF_SPACE_ID);
- }
-
-ibuf_insert_done:
- if (offsets_heap) {
- mem_heap_free(offsets_heap);
- }
-
- if (err == DB_SUCCESS && op != IBUF_OP_DELETE) {
- /* Update the page max trx id field */
- page_update_max_trx_id(block, NULL,
- thr_get_trx(thr)->id, &mtr);
- }
-
- goto func_exit;
-}
-
-/** Buffer an operation in the change buffer, instead of applying it
-directly to the file page, if this is possible. Does not do it if the index
-is clustered or unique.
-@param[in] op operation type
-@param[in] entry index entry to insert
-@param[in,out] index index where to insert
-@param[in] page_id page id where to insert
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in,out] thr query thread
-@return true if success */
-TRANSACTIONAL_TARGET
-bool
-ibuf_insert(
- ibuf_op_t op,
- const dtuple_t* entry,
- dict_index_t* index,
- const page_id_t page_id,
- ulint zip_size,
- que_thr_t* thr)
-{
- dberr_t err;
- ulint entry_size;
- ibool no_counter;
- /* Read the settable global variable only once in
- this function, so that we will have a consistent view of it. */
- ibuf_use_t use = ibuf_use_t(innodb_change_buffering);
- DBUG_ENTER("ibuf_insert");
-
- DBUG_PRINT("ibuf", ("op: %d, space: " UINT32PF ", page_no: " UINT32PF,
- op, page_id.space(), page_id.page_no()));
-
- ut_ad(dtuple_check_typed(entry));
- ut_ad(page_id.space() != SRV_TMP_SPACE_ID);
- ut_ad(index->is_btree());
- ut_a(!dict_index_is_clust(index));
- ut_ad(!index->table->is_temporary());
-
- no_counter = use <= IBUF_USE_INSERT;
-
- switch (op) {
- case IBUF_OP_INSERT:
- switch (use) {
- case IBUF_USE_NONE:
- case IBUF_USE_DELETE:
- case IBUF_USE_DELETE_MARK:
- DBUG_RETURN(false);
- case IBUF_USE_INSERT:
- case IBUF_USE_INSERT_DELETE_MARK:
- case IBUF_USE_ALL:
- goto check_watch;
- }
- break;
- case IBUF_OP_DELETE_MARK:
- switch (use) {
- case IBUF_USE_NONE:
- case IBUF_USE_INSERT:
- DBUG_RETURN(false);
- case IBUF_USE_DELETE_MARK:
- case IBUF_USE_DELETE:
- case IBUF_USE_INSERT_DELETE_MARK:
- case IBUF_USE_ALL:
- ut_ad(!no_counter);
- goto check_watch;
- }
- break;
- case IBUF_OP_DELETE:
- switch (use) {
- case IBUF_USE_NONE:
- case IBUF_USE_INSERT:
- case IBUF_USE_INSERT_DELETE_MARK:
- DBUG_RETURN(false);
- case IBUF_USE_DELETE_MARK:
- case IBUF_USE_DELETE:
- case IBUF_USE_ALL:
- ut_ad(!no_counter);
- goto skip_watch;
- }
- break;
- case IBUF_OP_COUNT:
- break;
- }
-
- /* unknown op or use */
- ut_error;
-
-check_watch:
- /* If a thread attempts to buffer an insert on a page while a
- purge is in progress on the same page, the purge must not be
- buffered, because it could remove a record that was
- re-inserted later. For simplicity, we block the buffering of
- all operations on a page that has a purge pending.
-
- We do not check this in the IBUF_OP_DELETE case, because that
- would always trigger the buffer pool watch during purge and
- thus prevent the buffering of delete operations. We assume
- that the issuer of IBUF_OP_DELETE has called
- buf_pool_t::watch_set(). */
-
- if (buf_pool.page_hash_contains<true>(
- page_id, buf_pool.page_hash.cell_get(page_id.fold()))) {
- /* A buffer pool watch has been set or the
- page has been read into the buffer pool.
- Do not buffer the request. If a purge operation
- is being buffered, have this request executed
- directly on the page in the buffer pool after the
- buffered entries for this page have been merged. */
- DBUG_RETURN(false);
- }
-
-skip_watch:
- entry_size = rec_get_converted_size(index, entry, 0);
-
- if (entry_size
- >= page_get_free_space_of_empty(dict_table_is_comp(index->table))
- / 2) {
+ if (err != DB_SUCCESS)
+ goto func_exit;
- DBUG_RETURN(false);
- }
+ if (page_no != flst_get_last(PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST +
+ root->page.frame).page)
+ {
+ err= DB_CORRUPTION;
+ goto func_exit;
+ }
- err = ibuf_insert_low(BTR_MODIFY_PREV, op, no_counter,
- entry, entry_size,
- index, page_id, zip_size, thr);
- if (err == DB_FAIL) {
- err = ibuf_insert_low(BTR_INSERT_TREE,
- op, no_counter, entry, entry_size,
- index, page_id, zip_size, thr);
- }
+ /* Remove the page from the free list and update the ibuf size data */
+ if (buf_block_t *block=
+ buf_page_get_gen(page_id_t{0, page_no}, 0, RW_X_LATCH, nullptr, BUF_GET,
+ &mtr, &err))
+ err= flst_remove(root, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
+ block, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
- ut_a(err == DB_SUCCESS || err == DB_STRONG_FAIL
- || err == DB_TOO_BIG_RECORD);
+ if (err == DB_SUCCESS)
+ buf_page_free(fil_system.sys_space, page_no, &mtr);
- DBUG_RETURN(err == DB_SUCCESS);
+ goto func_exit;
}
MY_ATTRIBUTE((nonnull, warn_unused_result))
@@ -3629,9 +316,7 @@ ibuf_insert_to_index_page_low(
return DB_SUCCESS;
/* Page reorganization or recompression should already have been
- attempted by page_cur_tuple_insert(). Besides, per
- ibuf_index_page_calc_free_zip() the page should not have been
- recompressed or reorganized. */
+ attempted by page_cur_tuple_insert(). */
ut_ad(!is_buf_block_get_page_zip(page_cur->block));
/* If the record did not fit, reorganize */
@@ -3669,19 +354,16 @@ ibuf_insert_to_index_page(
block->page.id().page_no()));
ut_ad(!dict_index_is_online_ddl(index));// this is an ibuf_dummy index
- ut_ad(ibuf_inside(mtr));
ut_ad(dtuple_check_typed(entry));
#ifdef BTR_CUR_HASH_ADAPT
- /* A change buffer merge must occur before users are granted
- any access to the page. No adaptive hash index entries may
- point to a freshly read page. */
+ /* ibuf_cleanup() must finish before the adaptive hash index
+ can be inserted into. */
ut_ad(!block->index);
- assert_block_ahi_empty(block);
#endif /* BTR_CUR_HASH_ADAPT */
ut_ad(mtr->is_named_space(block->page.id().space()));
- if (UNIV_UNLIKELY(dict_table_is_comp(index->table)
- != (ibool)!!page_is_comp(page))) {
+ if (UNIV_UNLIKELY(index->table->not_redundant()
+ != !!page_is_comp(page))) {
return DB_CORRUPTION;
}
@@ -3821,7 +503,6 @@ ibuf_set_del_mark(
page_cur.index = index;
ulint up_match = 0, low_match = 0;
- ut_ad(ibuf_inside(mtr));
ut_ad(dtuple_check_typed(entry));
if (!page_cur_search_with_match(entry, PAGE_CUR_LE,
@@ -3880,7 +561,6 @@ ibuf_delete(
page_cur.index = index;
ulint up_match = 0, low_match = 0;
- ut_ad(ibuf_inside(mtr));
ut_ad(dtuple_check_typed(entry));
ut_ad(!index->is_spatial());
ut_ad(!index->is_clust());
@@ -3889,7 +569,6 @@ ibuf_delete(
&up_match, &low_match, &page_cur,
nullptr)
&& low_match == dtuple_get_n_fields(entry)) {
- page_zip_des_t* page_zip= buf_block_get_page_zip(block);
page_t* page = buf_block_get_frame(block);
rec_t* rec = page_cur_get_rec(&page_cur);
@@ -3899,7 +578,6 @@ ibuf_delete(
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets = offsets_;
mem_heap_t* heap = NULL;
- ulint max_ins_size = 0;
rec_offs_init(offsets_);
@@ -3930,12 +608,8 @@ ibuf_delete(
return;
}
- if (!page_zip) {
- max_ins_size
- = page_get_max_insert_size_after_reorganize(
- page, 1);
- }
#ifdef UNIV_ZIP_DEBUG
+ page_zip_des_t* page_zip= buf_block_get_page_zip(block);
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
page_cur_delete_rec(&page_cur, offsets, mtr);
@@ -3943,760 +617,416 @@ ibuf_delete(
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
- if (page_zip) {
- ibuf_update_free_bits_zip(block, mtr);
- } else {
- ibuf_update_free_bits_low(block, max_ins_size, mtr);
- }
-
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
}
}
-/*********************************************************************//**
-Restores insert buffer tree cursor position
-@return whether the position was restored */
-static MY_ATTRIBUTE((nonnull))
-bool
-ibuf_restore_pos(
-/*=============*/
- const page_id_t page_id,/*!< in: page identifier */
- const dtuple_t* search_tuple,
- /*!< in: search tuple for entries of page_no */
- btr_latch_mode mode, /*!< in: BTR_MODIFY_LEAF or BTR_PURGE_TREE */
- btr_pcur_t* pcur, /*!< in/out: persistent cursor whose
- position is to be restored */
- mtr_t* mtr) /*!< in/out: mini-transaction */
+/** Reset the bits in the bitmap page for the given page number.
+@param bitmap change buffer bitmap page
+@param offset page number
+@param mtr mini-transaction */
+static void ibuf_reset(buf_block_t &bitmap, uint32_t offset, mtr_t *mtr)
{
- if (UNIV_LIKELY(pcur->restore_position(mode, mtr) ==
- btr_pcur_t::SAME_ALL)) {
- return true;
- }
-
- if (fil_space_t* s = fil_space_t::get(page_id.space())) {
- ib::error() << "ibuf cursor restoration fails!"
- " ibuf record inserted to page "
- << page_id
- << " in file " << s->chain.start->name;
- s->release();
-
- ib::error() << BUG_REPORT_MSG;
-
- rec_print_old(stderr, btr_pcur_get_rec(pcur));
- rec_print_old(stderr, pcur->old_rec);
- dtuple_print(stderr, search_tuple);
- }
-
- ibuf_btr_pcur_commit_specify_mtr(pcur, mtr);
- return false;
+ offset&= uint32_t(bitmap.physical_size() - 1);
+ byte *map_byte= &bitmap.page.frame[PAGE_DATA + offset / 2];
+ /* We must reset IBUF_BITMAP_BUFFERED, but at the same time we will also
+ reset IBUF_BITMAP_FREE (and IBUF_BITMAP_IBUF, which should be clear). */
+ byte b= byte(*map_byte & ((offset & 1) ? byte{0xf} : byte{0xf0}));
+ mtr->write<1,mtr_t::MAYBE_NOP>(bitmap, map_byte, b);
}
-/**
-Delete a change buffer record.
-@param[in] page_id page identifier
-@param[in,out] pcur persistent cursor positioned on the record
-@param[in] search_tuple search key for (space,page_no)
-@param[in,out] mtr mini-transaction
-@return whether mtr was committed (due to pessimistic operation) */
-static MY_ATTRIBUTE((warn_unused_result, nonnull))
-bool ibuf_delete_rec(const page_id_t page_id, btr_pcur_t* pcur,
- const dtuple_t* search_tuple, mtr_t* mtr)
+/** Move to the next change buffer record. */
+ATTRIBUTE_COLD static dberr_t ibuf_move_to_next(btr_cur_t *cur, mtr_t *mtr)
{
- dberr_t err;
-
- ut_ad(ibuf_inside(mtr));
- ut_ad(page_rec_is_user_rec(btr_pcur_get_rec(pcur)));
- ut_ad(ibuf_rec_get_page_no(mtr, btr_pcur_get_rec(pcur))
- == page_id.page_no());
- ut_ad(ibuf_rec_get_space(mtr, btr_pcur_get_rec(pcur))
- == page_id.space());
-
- switch (btr_cur_optimistic_delete(btr_pcur_get_btr_cur(pcur),
- BTR_CREATE_FLAG, mtr)) {
- case DB_FAIL:
- break;
- case DB_SUCCESS:
- if (page_is_empty(btr_pcur_get_page(pcur))) {
- /* If a B-tree page is empty, it must be the root page
- and the whole B-tree must be empty. InnoDB does not
- allow empty B-tree pages other than the root. */
- ut_d(const page_t* root = btr_pcur_get_page(pcur));
-
- ut_ad(page_get_space_id(root) == IBUF_SPACE_ID);
- ut_ad(page_get_page_no(root)
- == FSP_IBUF_TREE_ROOT_PAGE_NO);
-
- /* ibuf.empty is protected by the root page latch.
- Before the deletion, it had to be FALSE. */
- ut_ad(!ibuf.empty);
- ibuf.empty = true;
- }
- /* fall through */
- default:
- return(FALSE);
- }
-
- /* We have to resort to a pessimistic delete from ibuf.
- Delete-mark the record so that it will not be applied again,
- in case the server crashes before the pessimistic delete is
- made persistent. */
- btr_rec_set_deleted<true>(btr_pcur_get_block(pcur),
- btr_pcur_get_rec(pcur), mtr);
-
- btr_pcur_store_position(pcur, mtr);
- ibuf_btr_pcur_commit_specify_mtr(pcur, mtr);
-
- ibuf_mtr_start(mtr);
- mysql_mutex_lock(&ibuf_mutex);
- mtr_x_lock_index(ibuf.index, mtr);
-
- if (!ibuf_restore_pos(page_id, search_tuple,
- BTR_PURGE_TREE_ALREADY_LATCHED, pcur, mtr)) {
- mysql_mutex_unlock(&ibuf_mutex);
- goto func_exit;
- }
-
- if (buf_block_t* ibuf_root = ibuf_tree_root_get(mtr)) {
- btr_cur_pessimistic_delete(&err, TRUE,
- btr_pcur_get_btr_cur(pcur),
- BTR_CREATE_FLAG, false, mtr);
- ut_a(err == DB_SUCCESS);
-
- ibuf_size_update(ibuf_root->page.frame);
- ibuf.empty = page_is_empty(ibuf_root->page.frame);
- }
-
- mysql_mutex_unlock(&ibuf_mutex);
- ibuf_btr_pcur_commit_specify_mtr(pcur, mtr);
-
-func_exit:
- ut_ad(mtr->has_committed());
- btr_pcur_close(pcur);
-
- return(TRUE);
-}
+ if (!page_cur_move_to_next(&cur->page_cur))
+ return DB_CORRUPTION;
+ if (!page_cur_is_after_last(&cur->page_cur))
+ return DB_SUCCESS;
-/** Check whether buffered changes exist for a page.
-@param[in] id page identifier
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@return whether buffered changes exist */
-bool ibuf_page_exists(const page_id_t id, ulint zip_size)
-{
- ut_ad(!fsp_is_system_temporary(id.space()));
+ /* The following is adapted from btr_pcur_move_to_next_page(),
+ but we will not release any latches. */
- const ulint physical_size = zip_size ? zip_size : srv_page_size;
+ const buf_block_t &block= *cur->page_cur.block;
+ const uint32_t next_page_no= btr_page_get_next(block.page.frame);
+ switch (next_page_no) {
+ case 0:
+ case 1:
+ return DB_CORRUPTION;
+ case FIL_NULL:
+ return DB_SUCCESS;
+ }
- if (ibuf_fixed_addr_page(id, physical_size)
- || fsp_descr_page(id, physical_size)) {
- return false;
- }
+ if (UNIV_UNLIKELY(next_page_no == block.page.id().page_no()))
+ return DB_CORRUPTION;
- mtr_t mtr;
- bool bitmap_bits = false;
+ dberr_t err;
+ buf_block_t *next=
+ btr_block_get(*cur->index(), next_page_no, BTR_MODIFY_LEAF, mtr, &err);
+ if (!next)
+ return err;
- ibuf_mtr_start(&mtr);
- if (const buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
- id, zip_size, &mtr)) {
- bitmap_bits = ibuf_bitmap_page_get_bits(
- bitmap_page->page.frame, id, zip_size,
- IBUF_BITMAP_BUFFERED, &mtr) != 0;
- }
- ibuf_mtr_commit(&mtr);
- return bitmap_bits;
-}
+ if (UNIV_UNLIKELY(memcmp_aligned<4>(next->page.frame + FIL_PAGE_PREV,
+ block.page.frame + FIL_PAGE_OFFSET, 4)))
+ return DB_CORRUPTION;
-/** Reset the bits in the bitmap page for the given block and page id.
-@param b X-latched secondary index page (nullptr to discard changes)
-@param page_id page identifier
-@param zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param mtr mini-transaction */
-static void ibuf_reset_bitmap(buf_block_t *b, page_id_t page_id,
- ulint zip_size, mtr_t *mtr)
-{
- buf_block_t *bitmap= ibuf_bitmap_get_map_page(page_id, zip_size, mtr);
- if (!bitmap)
- return;
-
- const ulint physical_size = zip_size ? zip_size : srv_page_size;
- /* FIXME: update the bitmap byte only once! */
- ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>(bitmap, page_id,
- physical_size, false, mtr);
-
- if (b)
- ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>(bitmap, page_id, physical_size,
- ibuf_index_page_calc_free(b),
- mtr);
+ page_cur_set_before_first(next, &cur->page_cur);
+ return page_cur_move_to_next(&cur->page_cur) ? DB_SUCCESS : DB_CORRUPTION;
}
-/** When an index page is read from a disk to the buffer pool, this function
-applies any buffered operations to the page and deletes the entries from the
-insert buffer. If the page is not read, but created in the buffer pool, this
-function deletes its buffered entries from the insert buffer; there can
-exist entries for such a page if the page belonged to an index which
-subsequently was dropped.
-@param block X-latched page to try to apply changes to, or NULL to discard
-@param page_id page identifier
-@param zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@return error code */
-dberr_t ibuf_merge_or_delete_for_page(buf_block_t *block,
- const page_id_t page_id,
- ulint zip_size)
+/** Apply changes to a block. */
+ATTRIBUTE_COLD
+static dberr_t ibuf_merge(fil_space_t *space, btr_cur_t *cur, mtr_t *mtr)
{
- if (trx_sys_hdr_page(page_id)) {
- return DB_SUCCESS;
- }
-
- ut_ad(!block || page_id == block->page.id());
- ut_ad(!block || block->page.frame);
- ut_ad(!block || !block->page.is_ibuf_exist());
- ut_ad(!block || !block->page.is_reinit());
- ut_ad(!trx_sys_hdr_page(page_id));
- ut_ad(page_id < page_id_t(SRV_SPACE_ID_UPPER_BOUND, 0));
-
- const ulint physical_size = zip_size ? zip_size : srv_page_size;
-
- if (ibuf_fixed_addr_page(page_id, physical_size)
- || fsp_descr_page(page_id, physical_size)) {
- return DB_SUCCESS;
- }
-
- btr_pcur_t pcur;
-#ifdef UNIV_IBUF_DEBUG
- ulint volume = 0;
-#endif /* UNIV_IBUF_DEBUG */
- dberr_t err = DB_SUCCESS;
- mtr_t mtr;
-
- fil_space_t* space = fil_space_t::get(page_id.space());
-
- if (UNIV_UNLIKELY(!space)) {
- block = nullptr;
- } else {
- ulint bitmap_bits = 0;
+ if (btr_cur_get_rec(cur)[4])
+ return DB_CORRUPTION;
- ibuf_mtr_start(&mtr);
+ const uint32_t space_id= mach_read_from_4(btr_cur_get_rec(cur));
+ const uint32_t page_no= mach_read_from_4(btr_cur_get_rec(cur) + 5);
- buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
- page_id, zip_size, &mtr);
+ buf_block_t *block= space && page_no < space->size
+ ? buf_page_get_gen(page_id_t{space_id, page_no}, space->zip_size(),
+ RW_X_LATCH, nullptr, BUF_GET_POSSIBLY_FREED, mtr)
+ : nullptr;
- if (bitmap_page
- && fil_page_get_type(bitmap_page->page.frame)
- != FIL_PAGE_TYPE_ALLOCATED) {
- bitmap_bits = ibuf_bitmap_page_get_bits(
- bitmap_page->page.frame, page_id, zip_size,
- IBUF_BITMAP_BUFFERED, &mtr);
- }
+ buf_block_t *bitmap= block
+ ? buf_page_get_gen(page_id_t(space_id,
+ uint32_t(page_no &
+ ~(block->physical_size() - 1)) + 1),
+ block->zip_size(), RW_X_LATCH, nullptr,
+ BUF_GET_POSSIBLY_FREED, mtr)
+ : nullptr;
- ibuf_mtr_commit(&mtr);
-
- if (bitmap_bits
- && DB_SUCCESS
- == fseg_page_is_allocated(space, page_id.page_no())) {
- ibuf_mtr_start(&mtr);
- mtr.set_named_space(space);
- ibuf_reset_bitmap(block, page_id, zip_size, &mtr);
- ibuf_mtr_commit(&mtr);
- bitmap_bits = 0;
- if (!block
- || btr_page_get_index_id(block->page.frame)
- != DICT_IBUF_ID_MIN + IBUF_SPACE_ID) {
- ibuf_delete_recs(page_id);
- }
- }
+ if (!block);
+ else if (fil_page_get_type(block->page.frame) != FIL_PAGE_INDEX ||
+ !page_is_leaf(block->page.frame) ||
+ DB_SUCCESS == fseg_page_is_allocated(space, page_no))
+ block= nullptr;
- if (!bitmap_bits) {
- /* No changes are buffered for this page. */
- space->release();
- return DB_SUCCESS;
- }
- }
-
- if (!block) {
- } else if (!fil_page_index_page_check(block->page.frame)
- || !page_is_leaf(block->page.frame)) {
- space->set_corrupted();
- err = DB_CORRUPTION;
- block = nullptr;
- } else {
- /* Move the ownership of the x-latch on the page to this OS
- thread, so that we can acquire a second x-latch on it. This
- is needed for the insert operations to the index page to pass
- the debug checks. */
-
- block->page.lock.claim_ownership();
- }
-
- mem_heap_t* heap = mem_heap_create(512);
-
- const dtuple_t* search_tuple = ibuf_search_tuple_build(
- page_id.space(), page_id.page_no(), heap);
-
- /* Counts for merged & discarded operations. */
- ulint mops[IBUF_OP_COUNT];
- ulint dops[IBUF_OP_COUNT];
-
- memset(mops, 0, sizeof(mops));
- memset(dops, 0, sizeof(dops));
- pcur.btr_cur.page_cur.index = ibuf.index;
-
-loop:
- ibuf_mtr_start(&mtr);
-
- /* Position pcur in the insert buffer at the first entry for this
- index page */
- if (btr_pcur_open_on_user_rec(search_tuple,
- BTR_MODIFY_LEAF, &pcur, &mtr)
- != DB_SUCCESS) {
- err = DB_CORRUPTION;
- goto reset_bit;
- }
-
- if (block) {
- block->page.fix();
- block->page.lock.x_lock_recursive();
- mtr.memo_push(block, MTR_MEMO_PAGE_X_FIX);
- }
-
- if (space) {
- mtr.set_named_space(space);
- }
-
- if (!btr_pcur_is_on_user_rec(&pcur)) {
- ut_ad(btr_pcur_is_after_last_on_page(&pcur));
- goto reset_bit;
- }
-
- for (;;) {
- rec_t* rec;
-
- ut_ad(btr_pcur_is_on_user_rec(&pcur));
+ do
+ {
+ rec_t *rec= cur->page_cur.rec;
+ ulint n_fields= rec_get_n_fields_old(rec);
- rec = btr_pcur_get_rec(&pcur);
+ if (n_fields <= IBUF_REC_FIELD_USER + 1 || rec[4])
+ return DB_CORRUPTION;
- /* Check if the entry is for this index page */
- if (ibuf_rec_get_page_no(&mtr, rec) != page_id.page_no()
- || ibuf_rec_get_space(&mtr, rec) != page_id.space()) {
+ n_fields-= IBUF_REC_FIELD_USER;
- if (block != NULL) {
- page_header_reset_last_insert(block, &mtr);
- }
+ ulint types_len, not_redundant;
- goto reset_bit;
- }
+ if (rec_get_1byte_offs_flag(rec))
+ {
+ if (rec_1_get_field_end_info(rec, 0) != 4 ||
+ rec_1_get_field_end_info(rec, 1) != 5 ||
+ rec_1_get_field_end_info(rec, 2) != 9)
+ return DB_CORRUPTION;
+ types_len= rec_1_get_field_end_info(rec, 3);
+ }
+ else
+ {
+ if (rec_2_get_field_end_info(rec, 0) != 4 ||
+ rec_2_get_field_end_info(rec, 1) != 5 ||
+ rec_2_get_field_end_info(rec, 2) != 9)
+ return DB_CORRUPTION;
+ types_len= rec_2_get_field_end_info(rec, 3);
+ }
- if (err) {
- fputs("InnoDB: Discarding record\n ", stderr);
- rec_print_old(stderr, rec);
- fputs("\nInnoDB: from the insert buffer!\n\n", stderr);
- } else if (block != NULL && !rec_get_deleted_flag(rec, 0)) {
- /* Now we have at pcur a record which should be
- applied on the index page; NOTE that the call below
- copies pointers to fields in rec, and we must
- keep the latch to the rec page until the
- insertion is finished! */
- dtuple_t* entry;
- trx_id_t max_trx_id;
- dict_index_t* dummy_index;
- ibuf_op_t op = ibuf_rec_get_op_type(&mtr, rec);
-
- max_trx_id = page_get_max_trx_id(page_align(rec));
- page_update_max_trx_id(block,
- buf_block_get_page_zip(block),
- max_trx_id, &mtr);
-
- ut_ad(page_validate(page_align(rec), ibuf.index));
-
- entry = ibuf_build_entry_from_ibuf_rec(
- &mtr, rec, heap, &dummy_index);
- ut_ad(!dummy_index->table->space);
- dummy_index->table->space = space;
- dummy_index->table->space_id = space->id;
-
- ut_ad(page_validate(block->page.frame, dummy_index));
-
- switch (op) {
- case IBUF_OP_INSERT:
-#ifdef UNIV_IBUF_DEBUG
- volume += rec_get_converted_size(
- dummy_index, entry, 0);
-
- volume += page_dir_calc_reserved_space(1);
-
- ut_a(volume <= (4U << srv_page_size_shift)
- / IBUF_PAGE_SIZE_PER_FREE_SPACE);
-#endif
- ibuf_insert_to_index_page(
- entry, block, dummy_index, &mtr);
- break;
-
- case IBUF_OP_DELETE_MARK:
- ibuf_set_del_mark(
- entry, block, dummy_index, &mtr);
- break;
-
- case IBUF_OP_DELETE:
- ibuf_delete(entry, block, dummy_index, &mtr);
- /* Because ibuf_delete() will latch an
- insert buffer bitmap page, commit mtr
- before latching any further pages.
- Store and restore the cursor position. */
- ut_ad(rec == btr_pcur_get_rec(&pcur));
- ut_ad(page_rec_is_user_rec(rec));
- ut_ad(ibuf_rec_get_page_no(&mtr, rec)
- == page_id.page_no());
- ut_ad(ibuf_rec_get_space(&mtr, rec)
- == page_id.space());
-
- /* Mark the change buffer record processed,
- so that it will not be merged again in case
- the server crashes between the following
- mtr_commit() and the subsequent mtr_commit()
- of deleting the change buffer record. */
- btr_rec_set_deleted<true>(
- btr_pcur_get_block(&pcur),
- btr_pcur_get_rec(&pcur), &mtr);
-
- btr_pcur_store_position(&pcur, &mtr);
- ibuf_btr_pcur_commit_specify_mtr(&pcur, &mtr);
-
- ibuf_mtr_start(&mtr);
- mtr.set_named_space(space);
-
- block->page.lock.x_lock_recursive();
- block->fix();
- mtr.memo_push(block, MTR_MEMO_PAGE_X_FIX);
-
- if (!ibuf_restore_pos(page_id, search_tuple,
- BTR_MODIFY_LEAF,
- &pcur, &mtr)) {
-
- ut_ad(mtr.has_committed());
- mops[op]++;
- ibuf_dummy_index_free(dummy_index);
- goto loop;
- }
-
- break;
- default:
- ut_error;
- }
-
- mops[op]++;
-
- ibuf_dummy_index_free(dummy_index);
- } else {
- dops[ibuf_rec_get_op_type(&mtr, rec)]++;
- }
+ if (types_len < 9 || (types_len - 9) / 6 != n_fields)
+ return DB_CORRUPTION;
- /* Delete the record from ibuf */
- if (ibuf_delete_rec(page_id, &pcur, search_tuple, &mtr)) {
- /* Deletion was pessimistic and mtr was committed:
- we start from the beginning again */
+ ibuf_op op= IBUF_OP_INSERT;
+ const ulint info_len= (types_len - 9) % 6;
- ut_ad(mtr.has_committed());
- goto loop;
- } else if (btr_pcur_is_after_last_on_page(&pcur)) {
- ibuf_mtr_commit(&mtr);
- goto loop;
- }
- }
+ switch (info_len) {
+ default:
+ return DB_CORRUPTION;
+ case 0: case 1:
+ not_redundant= info_len;
+ break;
+ case 4:
+ not_redundant= rec[9 + 3];
+ if (rec[9 + 2] > IBUF_OP_DELETE || not_redundant > 1)
+ return DB_CORRUPTION;
+ op= static_cast<ibuf_op>(rec[9 + 2]);
+ }
-reset_bit:
- if (space) {
- ibuf_reset_bitmap(block, page_id, zip_size, &mtr);
- }
+ const byte *const types= rec + 9 + info_len;
- ibuf_mtr_commit(&mtr);
- ut_free(pcur.old_rec_buf);
+ if (ibuf_rec_get_space(rec) != space_id ||
+ ibuf_rec_get_page_no(rec) != page_no)
+ break;
- if (space) {
- space->release();
- }
+ if (!rec_get_deleted_flag(rec, 0))
+ {
+ /* Delete-mark the record so that it will not be applied again if
+ the server is killed before the completion of ibuf_upgrade(). */
+ btr_rec_set_deleted<true>(cur->page_cur.block, rec, mtr);
+
+ if (block)
+ {
+ page_header_reset_last_insert(block, mtr);
+ page_update_max_trx_id(block, buf_block_get_page_zip(block),
+ page_get_max_trx_id(page_align(rec)), mtr);
+ dict_index_t *index;
+ mem_heap_t *heap = mem_heap_create(512);
+ dtuple_t *entry= ibuf_entry_build(rec, not_redundant, n_fields,
+ types, heap, index);
+ dict_table_t *table= index->table;
+ ut_ad(!table->space);
+ table->space= space;
+ table->space_id= space_id;
+
+ switch (op) {
+ case IBUF_OP_INSERT:
+ ibuf_insert_to_index_page(entry, block, index, mtr);
+ break;
+ case IBUF_OP_DELETE_MARK:
+ ibuf_set_del_mark(entry, block, index, mtr);
+ break;
+ case IBUF_OP_DELETE:
+ ibuf_delete(entry, block, index, mtr);
+ break;
+ }
+
+ mem_heap_free(heap);
+ dict_mem_index_free(index);
+ dict_mem_table_free(table);
+ }
+ }
- mem_heap_free(heap);
+ if (dberr_t err= ibuf_move_to_next(cur, mtr))
+ return err;
+ }
+ while (!page_cur_is_after_last(&cur->page_cur));
- ibuf.n_merges++;
- ibuf_add_ops(ibuf.n_merged_ops, mops);
- ibuf_add_ops(ibuf.n_discarded_ops, dops);
+ if (bitmap)
+ ibuf_reset(*bitmap, page_no, mtr);
- return err;
+ return DB_SUCCESS;
}
-/** Delete all change buffer entries for a tablespace,
-in DISCARD TABLESPACE, IMPORT TABLESPACE, or read-ahead.
-@param[in] space missing or to-be-discarded tablespace */
-void ibuf_delete_for_discarded_space(uint32_t space)
+static dberr_t ibuf_open(btr_cur_t *cur, mtr_t *mtr)
{
- if (UNIV_UNLIKELY(!ibuf.index)) return;
-
- btr_pcur_t pcur;
- const rec_t* ibuf_rec;
- mtr_t mtr;
-
- /* Counts for discarded operations. */
- ulint dops[IBUF_OP_COUNT];
-
- dfield_t dfield[IBUF_REC_FIELD_METADATA];
- dtuple_t search_tuple {0,IBUF_REC_FIELD_METADATA,
- IBUF_REC_FIELD_METADATA,dfield,0
- ,nullptr
-#ifdef UNIV_DEBUG
- ,DATA_TUPLE_MAGIC_N
-#endif /* UNIV_DEBUG */
- };
- byte space_id[4];
- mach_write_to_4(space_id, space);
-
- dfield_set_data(&dfield[0], space_id, 4);
- dfield_set_data(&dfield[1], field_ref_zero, 1);
- dfield_set_data(&dfield[2], field_ref_zero, 4);
- dtuple_set_types_binary(&search_tuple, IBUF_REC_FIELD_METADATA);
- /* Use page number 0 to build the search tuple so that we get the
- cursor positioned at the first entry for this space id */
-
- memset(dops, 0, sizeof(dops));
- pcur.btr_cur.page_cur.index = ibuf.index;
-
-loop:
- log_free_check();
- ibuf_mtr_start(&mtr);
-
- /* Position pcur in the insert buffer at the first entry for the
- space */
- if (btr_pcur_open_on_user_rec(&search_tuple,
- BTR_MODIFY_LEAF, &pcur, &mtr)
- != DB_SUCCESS) {
- goto leave_loop;
- }
-
- if (!btr_pcur_is_on_user_rec(&pcur)) {
- ut_ad(btr_pcur_is_after_last_on_page(&pcur));
- goto leave_loop;
- }
-
- for (;;) {
- ut_ad(btr_pcur_is_on_user_rec(&pcur));
-
- ibuf_rec = btr_pcur_get_rec(&pcur);
-
- /* Check if the entry is for this space */
- if (ibuf_rec_get_space(&mtr, ibuf_rec) != space) {
-
- goto leave_loop;
- }
-
- uint32_t page_no = ibuf_rec_get_page_no(&mtr, ibuf_rec);
+ ut_ad(mtr->get_savepoint() == 1);
- dops[ibuf_rec_get_op_type(&mtr, ibuf_rec)]++;
+ uint32_t page= FSP_IBUF_TREE_ROOT_PAGE_NO;
- /* Delete the record from ibuf */
- if (ibuf_delete_rec(page_id_t(space, page_no),
- &pcur, &search_tuple, &mtr)) {
- /* Deletion was pessimistic and mtr was committed:
- we start from the beginning again */
+ for (ulint height= ULINT_UNDEFINED;;)
+ {
+ dberr_t err;
+ buf_block_t* block= btr_block_get(*cur->index(), page, RW_X_LATCH, mtr,
+ &err);
+ ut_ad(!block == (err != DB_SUCCESS));
- ut_ad(mtr.has_committed());
-clear:
- ut_free(pcur.old_rec_buf);
- goto loop;
- }
+ if (!block)
+ return err;
- if (btr_pcur_is_after_last_on_page(&pcur)) {
- ibuf_mtr_commit(&mtr);
- goto clear;
- }
- }
+ page_cur_set_before_first(block, &cur->page_cur);
+ const uint32_t l= btr_page_get_level(block->page.frame);
-leave_loop:
- ibuf_mtr_commit(&mtr);
- ut_free(pcur.old_rec_buf);
+ if (height == ULINT_UNDEFINED)
+ height= l;
+ else
+ {
+ /* Release the parent page latch. */
+ ut_ad(mtr->get_savepoint() == 3);
+ mtr->rollback_to_savepoint(1, 2);
- ibuf_add_ops(ibuf.n_discarded_ops, dops);
-}
+ if (UNIV_UNLIKELY(height != l))
+ return DB_CORRUPTION;
+ }
-/******************************************************************//**
-Looks if the insert buffer is empty.
-@return true if empty */
-bool
-ibuf_is_empty(void)
-/*===============*/
-{
- mtr_t mtr;
+ if (!height)
+ return ibuf_move_to_next(cur, mtr);
- ibuf_mtr_start(&mtr);
+ height--;
- ut_d(mysql_mutex_lock(&ibuf_mutex));
- const buf_block_t* root = ibuf_tree_root_get(&mtr);
- bool is_empty = root && page_is_empty(root->page.frame);
- ut_ad(!root || is_empty == ibuf.empty);
- ut_d(mysql_mutex_unlock(&ibuf_mutex));
- ibuf_mtr_commit(&mtr);
+ if (!page_cur_move_to_next(&cur->page_cur))
+ return DB_CORRUPTION;
- return(is_empty);
+ const rec_t *ptr= cur->page_cur.rec;
+ const ulint n_fields= rec_get_n_fields_old(ptr);
+ if (n_fields <= IBUF_REC_FIELD_USER)
+ return DB_CORRUPTION;
+ ulint len;
+ ptr+= rec_get_nth_field_offs_old(ptr, n_fields - 1, &len);
+ if (len != 4)
+ return DB_CORRUPTION;
+ page= mach_read_from_4(ptr);
+ }
}
-/******************************************************************//**
-Prints info of ibuf. */
-void
-ibuf_print(
-/*=======*/
- FILE* file) /*!< in: file where to print */
+ATTRIBUTE_COLD dberr_t ibuf_upgrade()
{
- if (UNIV_UNLIKELY(!ibuf.index)) return;
-
- mysql_mutex_lock(&ibuf_mutex);
- if (ibuf.empty)
+ if (srv_read_only_mode)
{
- mysql_mutex_unlock(&ibuf_mutex);
- return;
+ sql_print_error("InnoDB: innodb_read_only_mode prevents an upgrade");
+ return DB_READ_ONLY;
}
- const ulint size= ibuf.size;
- const ulint free_list_len= ibuf.free_list_len;
- const ulint seg_size= ibuf.seg_size;
- mysql_mutex_unlock(&ibuf_mutex);
-
- fprintf(file,
- "-------------\n"
- "INSERT BUFFER\n"
- "-------------\n"
- "size " ULINTPF ", free list len " ULINTPF ","
- " seg size " ULINTPF ", " ULINTPF " merges\n",
- size, free_list_len, seg_size, ulint{ibuf.n_merges});
- ibuf_print_ops("merged operations:\n", ibuf.n_merged_ops, file);
- ibuf_print_ops("discarded operations:\n", ibuf.n_discarded_ops, file);
-}
-
-/** Check the insert buffer bitmaps on IMPORT TABLESPACE.
-@param[in] trx transaction
-@param[in,out] space tablespace being imported
-@return DB_SUCCESS or error code */
-dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
-{
- ut_ad(trx->mysql_thd);
- ut_ad(space->purpose == FIL_TYPE_IMPORT);
+ sql_print_information("InnoDB: Upgrading the change buffer");
- const unsigned zip_size = space->zip_size();
- const unsigned physical_size = space->physical_size();
-
- uint32_t size= std::min(space->free_limit, space->size);
-
- if (size == 0) {
- return(DB_TABLE_NOT_FOUND);
- }
+#ifdef BTR_CUR_HASH_ADAPT
+ const bool ahi= btr_search_enabled;
+ if (ahi)
+ btr_search_disable();
+#endif
- mtr_t mtr;
+ dict_table_t *ibuf_table= dict_table_t::create({C_STRING_WITH_LEN("ibuf")},
+ fil_system.sys_space,
+ 1, 0, 0, 0);
+ dict_index_t *ibuf_index=
+ dict_mem_index_create(ibuf_table, "CLUST_IND", DICT_CLUSTERED, 1);
+ ibuf_index->id= ibuf_index_id;
+ ibuf_index->n_uniq= REC_MAX_N_FIELDS;
+ ibuf_index->lock.SRW_LOCK_INIT(index_tree_rw_lock_key);
+ ibuf_index->page= FSP_IBUF_TREE_ROOT_PAGE_NO;
+ ut_d(ibuf_index->is_dummy= true);
+ ut_d(ibuf_index->cached= true);
+
+ size_t spaces=0, pages= 0;
+ dberr_t err;
+ mtr_t mtr;
+ mtr.start();
+ mtr_x_lock_index(ibuf_index, &mtr);
- /* The two bitmap pages (allocation bitmap and ibuf bitmap) repeat
- every page_size pages. For example if page_size is 16 KiB, then the
- two bitmap pages repeat every 16 KiB * 16384 = 256 MiB. In the loop
- below page_no is measured in number of pages since the beginning of
- the space, as usual. */
+ {
+ btr_cur_t cur;
+ uint32_t prev_space_id= ~0U;
+ fil_space_t *space= nullptr;
+ cur.page_cur.index= ibuf_index;
+ log_free_check();
+ err= ibuf_open(&cur, &mtr);
+
+ while (err == DB_SUCCESS && !page_cur_is_after_last(&cur.page_cur))
+ {
+ const uint32_t space_id= ibuf_rec_get_space(cur.page_cur.rec);
+ if (space_id != prev_space_id)
+ {
+ if (space)
+ space->release();
+ prev_space_id= space_id;
+ space= fil_space_t::get(space_id);
+ if (space)
+ mtr.set_named_space(space);
+ spaces++;
+ }
+ pages++;
+ err= ibuf_merge(space, &cur, &mtr);
+ if (err == DB_SUCCESS)
+ {
+ /* Move to the next user index page. We buffer-fix the current
+ change buffer leaf page to prevent it from being evicted
+ before we have started a new mini-transaction. */
+ cur.page_cur.block->fix();
+ mtr.commit();
+
+ if (recv_sys.report(time(nullptr)))
+ {
+ sql_print_information("InnoDB: merged changes to"
+ " %zu tablespaces, %zu pages", spaces, pages);
+ service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL,
+ "merged changes to"
+ " %zu tablespaces, %zu pages",
+ spaces, pages);
+ }
+
+ log_free_check();
+ mtr.start();
+ mtr.page_lock(cur.page_cur.block, RW_X_LATCH);
+ if (space)
+ mtr.set_named_space(space);
+ }
+ }
+ mtr.commit();
+ if (space)
+ space->release();
+ }
- for (uint32_t page_no = 0; page_no < size; page_no += physical_size) {
- if (trx_is_interrupted(trx)) {
- return(DB_INTERRUPTED);
- }
+ if (err == DB_SUCCESS)
+ {
+ mtr.start();
+ if (buf_block_t *root= buf_page_get_gen(ibuf_root, 0, RW_X_LATCH,
+ nullptr, BUF_GET, &mtr, &err))
+ {
+ page_create(root, &mtr, false);
+ mtr.write<2,mtr_t::MAYBE_NOP>(*root, PAGE_HEADER + PAGE_LEVEL +
+ root->page.frame, 0U);
+ }
+ mtr.commit();
- mtr_start(&mtr);
+ while (err == DB_SUCCESS)
+ err= ibuf_remove_free_page(mtr);
- buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
- page_id_t(space->id, page_no), zip_size, &mtr);
- if (!bitmap_page) {
- mtr.commit();
- return DB_CORRUPTION;
- }
+ if (err == DB_SUCCESS_LOCKED_REC)
+ err= DB_SUCCESS;
+ }
- if (buf_is_zeroes(span<const byte>(bitmap_page->page.frame,
- physical_size))) {
- /* This means we got all-zero page instead of
- ibuf bitmap page. The subsequent page should be
- all-zero pages. */
-#ifdef UNIV_DEBUG
- for (uint32_t curr_page = page_no + 1;
- curr_page < physical_size; curr_page++) {
-
- buf_block_t* block = buf_page_get(
- page_id_t(space->id, curr_page),
- zip_size, RW_S_LATCH, &mtr);
- page_t* page = buf_block_get_frame(block);
- ut_ad(buf_is_zeroes(span<const byte>(
- page,
- physical_size)));
- }
-#endif /* UNIV_DEBUG */
- mtr_commit(&mtr);
- continue;
- }
+#ifdef BTR_CUR_HASH_ADAPT
+ if (ahi)
+ btr_search_enable();
+#endif
- for (uint32_t i = FSP_IBUF_BITMAP_OFFSET + 1; i < physical_size;
- i++) {
- const uint32_t offset = page_no + i;
- const page_id_t cur_page_id(space->id, offset);
-
- if (ibuf_bitmap_page_get_bits(
- bitmap_page->page.frame,
- cur_page_id, zip_size,
- IBUF_BITMAP_IBUF, &mtr)) {
-
- mtr_commit(&mtr);
-
- ib_errf(trx->mysql_thd,
- IB_LOG_LEVEL_ERROR,
- ER_INNODB_INDEX_CORRUPT,
- "File %s page %u"
- " is wrongly flagged to belong to the"
- " insert buffer",
- space->chain.start->name, offset);
- return(DB_CORRUPTION);
- }
-
- if (ibuf_bitmap_page_get_bits(
- bitmap_page->page.frame,
- cur_page_id, zip_size,
- IBUF_BITMAP_BUFFERED, &mtr)) {
-
- ib_errf(trx->mysql_thd,
- IB_LOG_LEVEL_WARN,
- ER_INNODB_INDEX_CORRUPT,
- "Buffered changes"
- " for file %s page %u are lost",
- space->chain.start->name, offset);
-
- /* Tolerate this error, so that
- slightly corrupted tables can be
- imported and dumped. Clear the bit. */
- ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>(
- bitmap_page, cur_page_id,
- physical_size, false, &mtr);
- }
- }
+ ibuf_index->lock.free();
+ dict_mem_index_free(ibuf_index);
+ dict_mem_table_free(ibuf_table);
- mtr_commit(&mtr);
- }
+ if (err)
+ sql_print_error("InnoDB: Unable to upgrade the change buffer");
+ else
+ sql_print_information("InnoDB: Upgraded the change buffer: "
+ "%zu tablespaces, %zu pages", spaces, pages);
- return(DB_SUCCESS);
+ return err;
}
-void ibuf_set_bitmap_for_bulk_load(buf_block_t *block, mtr_t *mtr, bool reset)
+dberr_t ibuf_upgrade_needed()
{
- ut_a(page_is_leaf(block->page.frame));
- const page_id_t id{block->page.id()};
- const auto zip_size= block->zip_size();
+ mtr_t mtr;
+ mtr.start();
+ mtr.x_lock_space(fil_system.sys_space);
+ dberr_t err;
+ const buf_block_t *header_page=
+ buf_page_get_gen(ibuf_header, 0, RW_S_LATCH, nullptr, BUF_GET, &mtr, &err);
- if (buf_block_t *bitmap_page= ibuf_bitmap_get_map_page(id, zip_size, mtr))
+ if (!header_page)
{
- if (ibuf_bitmap_page_get_bits(bitmap_page->page.frame, id, zip_size,
- IBUF_BITMAP_BUFFERED, mtr))
- ibuf_delete_recs(id);
-
- ulint free_val= reset ? 0 : ibuf_index_page_calc_free(block);
- /* FIXME: update the bitmap byte only once! */
- ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>
- (bitmap_page, id, block->physical_size(), free_val, mtr);
- ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>
- (bitmap_page, id, block->physical_size(), false, mtr);
+ err_exit:
+ sql_print_error("InnoDB: The change buffer is corrupted");
+ if (srv_force_recovery == SRV_FORCE_NO_LOG_REDO)
+ err= DB_SUCCESS;
+ func_exit:
+ mtr.commit();
+ return err;
+ }
+
+ const buf_block_t *root= buf_page_get_gen(ibuf_root, 0, RW_S_LATCH, nullptr,
+ BUF_GET, &mtr, &err);
+ if (!root)
+ goto err_exit;
+
+ if (UNIV_LIKELY(!page_has_siblings(root->page.frame)) &&
+ UNIV_LIKELY(!memcmp(root->page.frame + FIL_PAGE_TYPE, field_ref_zero,
+ srv_page_size -
+ (FIL_PAGE_DATA_END + FIL_PAGE_TYPE))))
+ /* the change buffer was removed; no need to upgrade */;
+ else if (page_is_comp(root->page.frame) ||
+ btr_page_get_index_id(root->page.frame) != ibuf_index_id ||
+ fil_page_get_type(root->page.frame) != FIL_PAGE_INDEX)
+ {
+ err= DB_CORRUPTION;
+ goto err_exit;
}
+ else if (srv_read_only_mode)
+ {
+ sql_print_error("InnoDB: innodb_read_only=ON prevents an upgrade"
+ " of the change buffer");
+ err= DB_READ_ONLY;
+ }
+ else if (srv_force_recovery != SRV_FORCE_NO_LOG_REDO)
+ err= DB_FAIL;
+
+ goto func_exit;
}
diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h
index a56598d3620..bfcc559cf5f 100644
--- a/storage/innobase/include/btr0btr.h
+++ b/storage/innobase/include/btr0btr.h
@@ -56,12 +56,8 @@ is acceptable for the program to die with a clear assert failure. */
#define BTR_MAX_LEVELS 100
#define BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode) \
- btr_latch_mode((latch_mode) & ~(BTR_INSERT \
- | BTR_DELETE_MARK \
- | BTR_RTREE_UNDO_INS \
+ btr_latch_mode((latch_mode) & ~(BTR_RTREE_UNDO_INS \
| BTR_RTREE_DELETE_MARK \
- | BTR_DELETE \
- | BTR_IGNORE_SEC_UNIQUE \
| BTR_ALREADY_S_LATCHED \
| BTR_LATCH_FOR_INSERT \
| BTR_LATCH_FOR_DELETE))
@@ -79,6 +75,14 @@ btr_root_adjust_on_import(
const dict_index_t* index) /*!< in: index tree */
MY_ATTRIBUTE((warn_unused_result));
+/** Check a file segment header within a B-tree root page.
+@param offset file segment header offset
+@param block B-tree root page
+@param space tablespace
+@return whether the segment header is valid */
+bool btr_root_fseg_validate(ulint offset, const buf_block_t &block,
+ const fil_space_t &space);
+
/** Report a decryption failure. */
ATTRIBUTE_COLD void btr_decryption_failed(const dict_index_t &index);
@@ -86,12 +90,11 @@ ATTRIBUTE_COLD void btr_decryption_failed(const dict_index_t &index);
@param[in] index index tree
@param[in] page page number
@param[in] mode latch mode
-@param[in] merge whether change buffer merge should be attempted
@param[in,out] mtr mini-transaction
@param[out] err error code
@return block */
buf_block_t *btr_block_get(const dict_index_t &index,
- uint32_t page, ulint mode, bool merge,
+ uint32_t page, ulint mode,
mtr_t *mtr, dberr_t *err= nullptr);
/**************************************************************//**
@@ -241,15 +244,7 @@ btr_root_raise_and_insert(
mtr_t* mtr, /*!< in: mtr */
dberr_t* err) /*!< out: error code */
MY_ATTRIBUTE((nonnull, warn_unused_result));
-/*************************************************************//**
-Reorganizes an index page.
-
-IMPORTANT: On success, the caller will have to update IBUF_BITMAP_FREE
-if this is a compressed leaf page in a secondary index. This has to
-be done either within the same mini-transaction, or by invoking
-ibuf_reset_free_bits() before mtr_commit(). On uncompressed pages,
-IBUF_BITMAP_FREE is unaffected by reorganization.
-
+/** Reorganize an index page.
@param cursor page cursor
@param mtr mini-transaction
@return error code
@@ -347,6 +342,7 @@ btr_check_node_ptr(
/*===============*/
dict_index_t* index, /*!< in: index tree */
buf_block_t* block, /*!< in: index page */
+ que_thr_t* thr, /*!< in/out: query thread */
mtr_t* mtr) /*!< in: mtr */
MY_ATTRIBUTE((warn_unused_result));
#endif /* UNIV_DEBUG */
@@ -450,15 +446,8 @@ btr_root_block_get(
or RW_X_LATCH */
mtr_t* mtr, /*!< in: mtr */
dberr_t* err); /*!< out: error code */
-/*************************************************************//**
-Reorganizes an index page.
-
-IMPORTANT: On success, the caller will have to update IBUF_BITMAP_FREE
-if this is a compressed leaf page in a secondary index. This has to
-be done either within the same mini-transaction, or by invoking
-ibuf_reset_free_bits() before mtr_commit(). On uncompressed pages,
-IBUF_BITMAP_FREE is unaffected by reorganization.
+/** Reorganize an index page.
@return error code
@retval DB_FAIL if reorganizing a ROW_FORMAT=COMPRESSED page failed */
dberr_t btr_page_reorganize_block(
@@ -529,9 +518,10 @@ btr_lift_page_up(
must not be empty: use
btr_discard_only_page_on_level if the last
record from the page should be removed */
+ que_thr_t* thr, /*!< in/out: query thread for SPATIAL INDEX */
mtr_t* mtr, /*!< in/out: mini-transaction */
dberr_t* err) /*!< out: error code */
- __attribute__((nonnull));
+ __attribute__((nonnull(1,2,4,5)));
#define BTR_N_LEAF_PAGES 1
#define BTR_TOTAL_SIZE 2
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index f6abc9f5e52..dc64054eb3e 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -56,11 +56,7 @@ enum {
BTR_KEEP_POS_FLAG = 8,
/** the caller is creating the index or wants to bypass the
index->info.online creation log */
- BTR_CREATE_FLAG = 16,
- /** the caller of btr_cur_optimistic_update() or
- btr_cur_update_in_place() will take care of
- updating IBUF_BITMAP_FREE */
- BTR_KEEP_IBUF_BITMAP = 32
+ BTR_CREATE_FLAG = 16
};
#include "que0types.h"
@@ -213,14 +209,8 @@ btr_cur_pessimistic_insert(
See if there is enough place in the page modification log to log
an update-in-place.
-@retval false if out of space; IBUF_BITMAP_FREE will be reset
-outside mtr if the page was recompressed
-@retval true if enough place;
-
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE if this is
-a secondary index leaf page. This has to be done either within the
-same mini-transaction, or by invoking ibuf_reset_free_bits() before
-mtr_commit(mtr). */
+@retval false if out of space
+@retval true if enough place */
bool
btr_cur_update_alloc_zip_func(
/*==========================*/
@@ -262,7 +252,7 @@ Updates a record when the update causes no size changes in its fields.
@return locking or undo log related error code, or
@retval DB_SUCCESS on success
@retval DB_ZIP_OVERFLOW if there is not enough space left
-on the compressed page (IBUF_BITMAP_FREE was reset outside mtr) */
+on a ROW_FORMAT=COMPRESSED page */
dberr_t
btr_cur_update_in_place(
/*====================*/
@@ -669,28 +659,13 @@ enum btr_cur_method {
reference is stored in the field
hash_node, and might be necessary to
update */
- BTR_CUR_BINARY, /*!< success using the binary search */
- BTR_CUR_INSERT_TO_IBUF, /*!< performed the intended insert to
- the insert buffer */
- BTR_CUR_DEL_MARK_IBUF, /*!< performed the intended delete
- mark in the insert/delete buffer */
- BTR_CUR_DELETE_IBUF, /*!< performed the intended delete in
- the insert/delete buffer */
- BTR_CUR_DELETE_REF /*!< row_purge_poss_sec() failed */
+ BTR_CUR_BINARY /*!< success using the binary search */
};
/** The tree cursor: the definition appears here only for the compiler
to know struct size! */
struct btr_cur_t {
page_cur_t page_cur; /*!< page cursor */
- purge_node_t* purge_node; /*!< purge node, for BTR_DELETE */
- /*------------------------------*/
- que_thr_t* thr; /*!< this field is only used
- when search_leaf()
- is called for an index entry
- insertion: the calling query
- thread is passed here to be
- used in the insert buffer */
/*------------------------------*/
/** The following fields are used in
search_leaf() to pass information: */
diff --git a/storage/innobase/include/btr0types.h b/storage/innobase/include/btr0types.h
index fc829e7857a..966247ffa00 100644
--- a/storage/innobase/include/btr0types.h
+++ b/storage/innobase/include/btr0types.h
@@ -69,7 +69,7 @@ enum btr_latch_mode {
Used in btr_pcur_move_backward_from_page(). */
BTR_SEARCH_PREV = 4 | BTR_SEARCH_LEAF,
/** Modify the previous record.
- Used in btr_pcur_move_backward_from_page() and ibuf_insert(). */
+ Used in btr_pcur_move_backward_from_page(). */
BTR_MODIFY_PREV = 4 | BTR_MODIFY_LEAF,
/** Start modifying the entire B-tree. */
BTR_MODIFY_TREE = 8 | BTR_MODIFY_LEAF,
@@ -77,24 +77,8 @@ enum btr_latch_mode {
Only used by rtr_search_to_nth_level(). */
BTR_CONT_MODIFY_TREE = 4 | BTR_MODIFY_TREE,
- /* BTR_INSERT, BTR_DELETE and BTR_DELETE_MARK are mutually
- exclusive. */
- /** The search tuple will be inserted to the secondary index
- at the searched position. When the leaf page is not in the
- buffer pool, try to use the change buffer. */
- BTR_INSERT = 64,
-
- /** Try to delete mark a secondary index leaf page record at
- the searched position using the change buffer when the page is
- not in the buffer pool. */
- BTR_DELETE_MARK = 128,
-
- /** Try to purge the record using the change buffer when the
- secondary index leaf page is not in the buffer pool. */
- BTR_DELETE = BTR_INSERT | BTR_DELETE_MARK,
-
/** The caller is already holding dict_index_t::lock S-latch. */
- BTR_ALREADY_S_LATCHED = 256,
+ BTR_ALREADY_S_LATCHED = 16,
/** Search and S-latch a leaf page, assuming that the
dict_index_t::lock S-latch is being held. */
BTR_SEARCH_LEAF_ALREADY_S_LATCHED = BTR_SEARCH_LEAF
@@ -111,28 +95,15 @@ enum btr_latch_mode {
BTR_MODIFY_ROOT_AND_LEAF_ALREADY_LATCHED = BTR_MODIFY_ROOT_AND_LEAF
| BTR_ALREADY_S_LATCHED,
- /** Attempt to delete-mark a secondary index record. */
- BTR_DELETE_MARK_LEAF = BTR_MODIFY_LEAF | BTR_DELETE_MARK,
- /** Attempt to delete-mark a secondary index record
- while holding the dict_index_t::lock S-latch. */
- BTR_DELETE_MARK_LEAF_ALREADY_S_LATCHED = BTR_DELETE_MARK_LEAF
- | BTR_ALREADY_S_LATCHED,
- /** Attempt to purge a secondary index record. */
- BTR_PURGE_LEAF = BTR_MODIFY_LEAF | BTR_DELETE,
- /** Attempt to purge a secondary index record
- while holding the dict_index_t::lock S-latch. */
- BTR_PURGE_LEAF_ALREADY_S_LATCHED = BTR_PURGE_LEAF
- | BTR_ALREADY_S_LATCHED,
-
/** In the case of BTR_MODIFY_TREE, the caller specifies
the intention to delete record only. It is used to optimize
block->lock range.*/
- BTR_LATCH_FOR_DELETE = 512,
+ BTR_LATCH_FOR_DELETE = 32,
/** In the case of BTR_MODIFY_TREE, the caller specifies
the intention to delete record only. It is used to optimize
block->lock range.*/
- BTR_LATCH_FOR_INSERT = 1024,
+ BTR_LATCH_FOR_INSERT = 64,
/** Attempt to delete a record in the tree. */
BTR_PURGE_TREE = BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE,
@@ -143,12 +114,8 @@ enum btr_latch_mode {
/** Attempt to insert a record into the tree. */
BTR_INSERT_TREE = BTR_MODIFY_TREE | BTR_LATCH_FOR_INSERT,
- /** This flag ORed to BTR_INSERT says that we can ignore possible
- UNIQUE definition on secondary indexes when we decide if we can use
- the insert buffer to speed up inserts */
- BTR_IGNORE_SEC_UNIQUE = 2048,
/** Rollback in spatial index */
- BTR_RTREE_UNDO_INS = 4096,
+ BTR_RTREE_UNDO_INS = 128,
/** Try to delete mark a spatial index record */
- BTR_RTREE_DELETE_MARK = 8192
+ BTR_RTREE_DELETE_MARK = 256
};
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 55c7a504a6c..420d4a388e8 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -48,10 +48,6 @@ Created 11/5/1995 Heikki Tuuri
#define BUF_GET_IF_IN_POOL 11 /*!< get if in pool */
#define BUF_PEEK_IF_IN_POOL 12 /*!< get if in pool, do not make
the block young in the LRU list */
-#define BUF_GET_IF_IN_POOL_OR_WATCH 15
- /*!< Get the page only if it's in the
- buffer pool, if not then set a watch
- on the page. */
#define BUF_GET_POSSIBLY_FREED 16
/*!< Like BUF_GET, but do not mind
if the file page has been freed. */
@@ -194,11 +190,9 @@ buf_page_t *buf_page_get_zip(const page_id_t page_id, ulint zip_size);
@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH
@param[in] guess guessed block or NULL
@param[in] mode BUF_GET, BUF_GET_IF_IN_POOL,
-BUF_PEEK_IF_IN_POOL, or BUF_GET_IF_IN_POOL_OR_WATCH
+or BUF_PEEK_IF_IN_POOL
@param[in,out] mtr mini-transaction
@param[out] err DB_SUCCESS or error code
-@param[in] allow_ibuf_merge Allow change buffer merge while
-reading the pages from file.
@return pointer to the block or NULL */
buf_block_t*
buf_page_get_gen(
@@ -208,9 +202,8 @@ buf_page_get_gen(
buf_block_t* guess,
ulint mode,
mtr_t* mtr,
- dberr_t* err = NULL,
- bool allow_ibuf_merge = false)
- MY_ATTRIBUTE((nonnull(6), warn_unused_result));
+ dberr_t* err = NULL)
+ MY_ATTRIBUTE((nonnull(6)));
/** This is the low level function used to get access to a database page.
@param[in] page_id page id
@@ -218,14 +211,10 @@ buf_page_get_gen(
@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH
@param[in] guess guessed block or NULL
@param[in] mode BUF_GET, BUF_GET_IF_IN_POOL,
-BUF_PEEK_IF_IN_POOL, or BUF_GET_IF_IN_POOL_OR_WATCH
+or BUF_PEEK_IF_IN_POOL
@param[in,out] mtr mini-transaction, or NULL if a
block with page_id is to be evicted
@param[out] err DB_SUCCESS or error code
-@param[in] allow_ibuf_merge Allow change buffer merge to happen
-while reading the page from file
-then it makes sure that it does merging of change buffer changes while
-reading the page from file.
@return pointer to the block or NULL */
buf_block_t*
buf_page_get_low(
@@ -235,8 +224,7 @@ buf_page_get_low(
buf_block_t* guess,
ulint mode,
mtr_t* mtr,
- dberr_t* err,
- bool allow_ibuf_merge);
+ dberr_t* err);
/** Initialize a page in the buffer pool. The page is usually not read
from a file even if it cannot be found in the buffer buf_pool. This is one
@@ -539,18 +527,16 @@ public:
static constexpr uint32_t REMOVE_HASH= 2;
/** smallest state() of a buffer page that is freed in the tablespace */
static constexpr uint32_t FREED= 3;
+ /* unused state: 1U<<29 */
/** smallest state() for a block that belongs to buf_pool.LRU */
- static constexpr uint32_t UNFIXED= 1U << 29;
- /** smallest state() of a block for which buffered changes may exist */
- static constexpr uint32_t IBUF_EXIST= 2U << 29;
+ static constexpr uint32_t UNFIXED= 2U << 29;
/** smallest state() of a (re)initialized page (no doublewrite needed) */
static constexpr uint32_t REINIT= 3U << 29;
/** smallest state() for an io-fixed block */
static constexpr uint32_t READ_FIX= 4U << 29;
+ /* unused state: 5U<<29 */
/** smallest state() for a write-fixed block */
- static constexpr uint32_t WRITE_FIX= 5U << 29;
- /** smallest state() for a write-fixed block with buffered changes */
- static constexpr uint32_t WRITE_FIX_IBUF= 6U << 29;
+ static constexpr uint32_t WRITE_FIX= 6U << 29;
/** smallest state() for a write-fixed block (no doublewrite was used) */
static constexpr uint32_t WRITE_FIX_REINIT= 7U << 29;
/** buf_pool.LRU status mask in state() */
@@ -562,8 +548,7 @@ public:
byte *frame;
/* @} */
/** ROW_FORMAT=COMPRESSED page; zip.data (but not the data it points to)
- is also protected by buf_pool.mutex;
- !frame && !zip.data means an active buf_pool.watch */
+ is also protected by buf_pool.mutex */
page_zip_des_t zip;
#ifdef UNIV_DEBUG
/** whether this->list is in buf_pool.zip_hash; protected by buf_pool.mutex */
@@ -696,13 +681,6 @@ public:
bool is_freed() const
{ const auto s= state(); ut_ad(s >= FREED); return s < UNFIXED; }
- bool is_ibuf_exist() const
- {
- const auto s= state();
- ut_ad(s >= UNFIXED);
- ut_ad(s < READ_FIX);
- return (s & LRU_MASK) == IBUF_EXIST;
- }
bool is_reinit() const { return !(~state() & REINIT); }
void set_reinit(uint32_t prev_state)
@@ -713,29 +691,10 @@ public:
ut_ad(s < prev_state + UNFIXED);
}
- void set_ibuf_exist()
- {
- ut_ad(lock.is_write_locked());
- ut_ad(id() < page_id_t(SRV_SPACE_ID_UPPER_BOUND, 0));
- const auto s= state();
- ut_ad(s >= UNFIXED);
- ut_ad(s < READ_FIX);
- ut_ad(s < IBUF_EXIST || s >= REINIT);
- zip.fix.fetch_add(IBUF_EXIST - (LRU_MASK & s));
- }
- void clear_ibuf_exist()
- {
- ut_ad(lock.is_write_locked());
- ut_ad(id() < page_id_t(SRV_SPACE_ID_UPPER_BOUND, 0));
- ut_d(const auto s=) zip.fix.fetch_sub(IBUF_EXIST - UNFIXED);
- ut_ad(s >= IBUF_EXIST);
- ut_ad(s < REINIT);
- }
-
uint32_t read_unfix(uint32_t s)
{
ut_ad(lock.is_write_locked());
- ut_ad(s == UNFIXED + 1 || s == IBUF_EXIST + 1 || s == REINIT + 1);
+ ut_ad(s == UNFIXED + 1 || s == REINIT + 1);
uint32_t old_state= zip.fix.fetch_add(s - READ_FIX);
ut_ad(old_state >= READ_FIX);
ut_ad(old_state < WRITE_FIX);
@@ -824,7 +783,7 @@ public:
uint32_t fix(uint32_t count= 1)
{
ut_ad(count);
- ut_ad(count < IBUF_EXIST);
+ ut_ad(count < REINIT);
uint32_t f= zip.fix.fetch_add(count);
ut_ad(f >= FREED);
ut_ad(!((f ^ (f + 1)) & LRU_MASK));
@@ -1424,78 +1383,10 @@ public:
public:
/** @return whether the buffer pool contains a page
- @tparam allow_watch whether to allow watch_is_sentinel()
@param page_id page identifier
@param chain hash table chain for page_id.fold() */
- template<bool allow_watch= false>
- TRANSACTIONAL_INLINE
- bool page_hash_contains(const page_id_t page_id, hash_chain &chain)
- {
- transactional_shared_lock_guard<page_hash_latch> g
- {page_hash.lock_get(chain)};
- buf_page_t *bpage= page_hash.get(page_id, chain);
- if (bpage >= &watch[0] && bpage < &watch[UT_ARR_SIZE(watch)])
- {
- ut_ad(!bpage->in_zip_hash);
- ut_ad(!bpage->zip.data);
- if (!allow_watch)
- bpage= nullptr;
- }
- return bpage;
- }
-
- /** Determine if a block is a sentinel for a buffer pool watch.
- @param bpage page descriptor
- @return whether bpage a sentinel for a buffer pool watch */
- bool watch_is_sentinel(const buf_page_t &bpage)
- {
-#ifdef SAFE_MUTEX
- DBUG_ASSERT(mysql_mutex_is_owner(&mutex) ||
- page_hash.lock_get(page_hash.cell_get(bpage.id().fold())).
- is_locked());
-#endif /* SAFE_MUTEX */
- ut_ad(bpage.in_file());
- if (&bpage < &watch[0] || &bpage >= &watch[array_elements(watch)])
- return false;
- ut_ad(!bpage.in_zip_hash);
- ut_ad(!bpage.zip.data);
- return true;
- }
-
- /** Check if a watched page has been read.
- This may only be called after !watch_set() and before invoking watch_unset().
- @param id page identifier
- @return whether the page was read to the buffer pool */
- TRANSACTIONAL_INLINE
- bool watch_occurred(const page_id_t id)
- {
- hash_chain &chain= page_hash.cell_get(id.fold());
- transactional_shared_lock_guard<page_hash_latch> g
- {page_hash.lock_get(chain)};
- /* The page must exist because watch_set() increments buf_fix_count. */
- return !watch_is_sentinel(*page_hash.get(id, chain));
- }
-
- /** Register a watch for a page identifier.
- @param id page identifier
- @param chain page_hash.cell_get(id.fold())
- @return a buffer page corresponding to id
- @retval nullptr if the block was not present in page_hash */
- buf_page_t *watch_set(const page_id_t id, hash_chain &chain);
-
- /** Stop watching whether a page has been read in.
- watch_set(id) must have returned nullptr before.
- @param id page identifier
- @param chain unlocked hash table chain */
- void watch_unset(const page_id_t id, hash_chain &chain);
-
- /** Remove the sentinel block for the watch before replacing it with a
- real block. watch_unset() or watch_occurred() will notice
- that the block has been replaced with the real block.
- @param w sentinel
- @param chain locked hash table chain
- @return w->state() */
- inline uint32_t watch_remove(buf_page_t *w, hash_chain &chain);
+ TRANSACTIONAL_TARGET
+ bool page_hash_contains(const page_id_t page_id, hash_chain &chain);
/** @return whether less than 1/4 of the buffer pool is available */
TPOOL_SUPPRESS_TSAN
@@ -1882,9 +1773,6 @@ public:
# error "BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN"
#endif
- /** Sentinels to detect if pages are read into the buffer pool while
- a delete-buffering operation is pending. Protected by mutex. */
- buf_page_t watch[innodb_purge_threads_MAX + 1];
/** Reserve a buffer. */
buf_tmp_buffer_t *io_buf_reserve() { return io_buf.reserve(); }
diff --git a/storage/innobase/include/buf0buf.inl b/storage/innobase/include/buf0buf.inl
index 4516a24803c..24f7352ca4c 100644
--- a/storage/innobase/include/buf0buf.inl
+++ b/storage/innobase/include/buf0buf.inl
@@ -90,7 +90,7 @@ inline bool buf_page_peek_if_too_old(const buf_page_t *bpage)
@return own: the allocated block, in state BUF_BLOCK_MEMORY */
inline buf_block_t *buf_block_alloc()
{
- return buf_LRU_get_free_block(false);
+ return buf_LRU_get_free_block(have_no_mutex);
}
/********************************************************************//**
diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h
index aec08e77f54..d8ce8333eb1 100644
--- a/storage/innobase/include/buf0lru.h
+++ b/storage/innobase/include/buf0lru.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2021, MariaDB Corporation.
+Copyright (c) 2017, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -62,6 +62,17 @@ bool buf_LRU_scan_and_free_block(ulint limit= ULINT_UNDEFINED);
@retval NULL if the free list is empty */
buf_block_t* buf_LRU_get_free_only();
+/** How to acquire a block */
+enum buf_LRU_get {
+ /** The caller is not holding buf_pool.mutex */
+ have_no_mutex= 0,
+ /** The caller is holding buf_pool.mutex */
+ have_mutex,
+ /** The caller is not holding buf_pool.mutex and is OK if a block
+ cannot be allocated. */
+ have_no_mutex_soft
+};
+
/** Get a block from the buf_pool.free list.
If the list is empty, blocks will be moved from the end of buf_pool.LRU
to buf_pool.free.
@@ -83,9 +94,10 @@ we put it to free list to be used.
* scan whole LRU list
* scan LRU list even if buf_pool.try_LRU_scan is not set
-@param have_mutex whether buf_pool.mutex is already being held
-@return the free control block, in state BUF_BLOCK_MEMORY */
-buf_block_t* buf_LRU_get_free_block(bool have_mutex)
+@param get how to allocate the block
+@return the free control block, in state BUF_BLOCK_MEMORY
+@retval nullptr if get==have_no_mutex_soft and memory was not available */
+buf_block_t* buf_LRU_get_free_block(buf_LRU_get get)
MY_ATTRIBUTE((malloc,warn_unused_result));
/** @return whether the unzip_LRU list should be used for evicting a victim
diff --git a/storage/innobase/include/buf0rea.h b/storage/innobase/include/buf0rea.h
index 4ec8938c689..ebf0f60ffe5 100644
--- a/storage/innobase/include/buf0rea.h
+++ b/storage/innobase/include/buf0rea.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2021, MariaDB Corporation.
+Copyright (c) 2015, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -24,8 +24,7 @@ The database buffer read
Created 11/5/1995 Heikki Tuuri
*******************************************************/
-#ifndef buf0rea_h
-#define buf0rea_h
+#pragma once
#include "buf0buf.h"
@@ -33,15 +32,17 @@ Created 11/5/1995 Heikki Tuuri
buffer buf_pool if it is not already there. Sets the io_fix flag and sets
an exclusive lock on the buffer frame. The flag is cleared and the x-lock
released by the i/o-handler thread.
-@param page_id page id
-@param zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@retval DB_SUCCESS if the page was read and is not corrupted
+@param page_id page id
+@param zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param chain buf_pool.page_hash cell for page_id
+@retval DB_SUCCESS if the page was read and is not corrupted,
@retval DB_SUCCESS_LOCKED_REC if the page was not read
-@retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted
+@retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted,
@retval DB_DECRYPTION_FAILED if page post encryption checksum matches but
after decryption normal page checksum does not match.
@retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */
-dberr_t buf_read_page(const page_id_t page_id, ulint zip_size);
+dberr_t buf_read_page(const page_id_t page_id, ulint zip_size,
+ buf_pool_t::hash_chain &chain);
/** High-level function which reads a page asynchronously from a file to the
buffer buf_pool if it is not already there. Sets the io_fix flag and sets
@@ -57,21 +58,14 @@ void buf_read_page_background(fil_space_t *space, const page_id_t page_id,
/** Applies a random read-ahead in buf_pool if there are at least a threshold
value of accessed pages from the random read-ahead area. Does not read any
page, not even the one at the position (space, offset), if the read-ahead
-mechanism is not activated. NOTE 1: the calling thread may own latches on
+mechanism is not activated. NOTE: the calling thread may own latches on
pages: to avoid deadlocks this function must be written such that it cannot
-end up waiting for these latches! NOTE 2: the calling thread must want
-access to the page given: this rule is set to prevent unintended read-aheads
-performed by ibuf routines, a situation which could result in a deadlock if
-the OS does not support asynchronous i/o.
+end up waiting for these latches!
@param[in] page_id page id of a page which the current thread
wants to access
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in] ibuf whether we are inside ibuf routine
-@return number of page read requests issued; NOTE that if we read ibuf
-pages, it may happen that the page at the given page number does not
-get read even if we return a positive value! */
-ulint
-buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf);
+@return number of page read requests issued */
+ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size);
/** Applies linear read-ahead if in the buf_pool the page is a border page of
a linear read-ahead area and all the pages in the area have been accessed.
@@ -92,26 +86,12 @@ only very improbably.
NOTE 2: the calling thread may own latches on pages: to avoid deadlocks this
function must be written such that it cannot end up waiting for these
latches!
-NOTE 3: the calling thread must want access to the page given: this rule is
-set to prevent unintended read-aheads performed by ibuf routines, a situation
-which could result in a deadlock if the OS does not support asynchronous io.
@param[in] page_id page id; see NOTE 3 above
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in] ibuf whether if we are inside ibuf routine
@return number of page read requests issued */
-ulint
-buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf);
+ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size);
/** Issue read requests for pages that need to be recovered.
@param space_id tablespace identifier
@param page_nos page numbers to read, in ascending order */
void buf_read_recv_pages(uint32_t space_id, st_::span<uint32_t> page_nos);
-
-/** @name Modes used in read-ahead @{ */
-/** read only pages belonging to the insert buffer tree */
-#define BUF_READ_IBUF_PAGES_ONLY 131
-/** read any page */
-#define BUF_READ_ANY_PAGE 132
-/* @} */
-
-#endif
diff --git a/storage/innobase/include/data0type.h b/storage/innobase/include/data0type.h
index 3d63ddb767c..d4885186087 100644
--- a/storage/innobase/include/data0type.h
+++ b/storage/innobase/include/data0type.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -33,7 +33,6 @@ Created 1/16/1996 Heikki Tuuri
/** @return whether a length is actually stored in a field */
#define len_is_stored(len) (len != UNIV_SQL_NULL && len != UNIV_SQL_DEFAULT)
-extern ulint data_mysql_default_charset_coll;
#define DATA_MYSQL_BINARY_CHARSET_COLL 63
/* SQL data type struct */
@@ -196,14 +195,6 @@ constexpr uint8_t DATA_MBR_LEN= uint8_t(SPDIMS * 2 * sizeof(double));
/*-------------------------------------------*/
-/* This many bytes we need to store the type information affecting the
-alphabetical order for a single field and decide the storage size of an
-SQL null*/
-#define DATA_ORDER_NULL_TYPE_BUF_SIZE 4
-/* In the >= 4.1.x storage format we add 2 bytes more so that we can also
-store the charset-collation number; one byte is left unused, though */
-#define DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE 6
-
/* Maximum multi-byte character length in bytes, plus 1 */
#define DATA_MBMAX 8
@@ -344,13 +335,11 @@ charset-collation code.
DATA_BINARY_TYPE etc.
@param[in] charset_coll character-set collation code
@return precise type, including the charset-collation code */
-UNIV_INLINE
-uint32_t
-dtype_form_prtype(ulint old_prtype, ulint charset_coll)
+inline uint32_t dtype_form_prtype(ulint old_prtype, ulint charset_coll)
{
- ut_ad(old_prtype < 256 * 256);
- ut_ad(charset_coll <= MAX_CHAR_COLL_NUM);
- return(uint32_t(old_prtype + (charset_coll << 16)));
+ ut_ad(old_prtype <= 0xffff);
+ ut_ad(charset_coll <= MAX_CHAR_COLL_NUM);
+ return uint32_t(old_prtype | (charset_coll << 16));
}
/*********************************************************************//**
@@ -439,40 +428,6 @@ dtype_get_sql_null_size(
const dtype_t* type, /*!< in: type */
ulint comp); /*!< in: nonzero=ROW_FORMAT=COMPACT */
-/**********************************************************************//**
-Reads to a type the stored information which determines its alphabetical
-ordering and the storage size of an SQL NULL value. */
-UNIV_INLINE
-void
-dtype_read_for_order_and_null_size(
-/*===============================*/
- dtype_t* type, /*!< in: type struct */
- const byte* buf); /*!< in: buffer for the stored order info */
-/**********************************************************************//**
-Stores for a type the information which determines its alphabetical ordering
-and the storage size of an SQL NULL value. This is the >= 4.1.x storage
-format. */
-UNIV_INLINE
-void
-dtype_new_store_for_order_and_null_size(
-/*====================================*/
- byte* buf, /*!< in: buffer for
- DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE
- bytes where we store the info */
- const dtype_t* type, /*!< in: type struct */
- ulint prefix_len);/*!< in: prefix length to
- replace type->len, or 0 */
-/**********************************************************************//**
-Reads to a type the stored information which determines its alphabetical
-ordering and the storage size of an SQL NULL value. This is the 4.1.x storage
-format. */
-UNIV_INLINE
-void
-dtype_new_read_for_order_and_null_size(
-/*===================================*/
- dtype_t* type, /*!< in: type struct */
- const byte* buf); /*!< in: buffer for stored type order info */
-
/*********************************************************************//**
Validates a data type structure.
@return TRUE if ok */
@@ -494,8 +449,6 @@ struct dict_col_t;
If you add fields to this structure, be sure to initialize them everywhere.
This structure is initialized in the following functions:
dtype_set()
-dtype_read_for_order_and_null_size()
-dtype_new_read_for_order_and_null_size()
sym_tab_add_null_lit() */
struct dtype_t{
diff --git a/storage/innobase/include/data0type.inl b/storage/innobase/include/data0type.inl
index 329cee5d190..add6c211bb9 100644
--- a/storage/innobase/include/data0type.inl
+++ b/storage/innobase/include/data0type.inl
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2020, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -183,126 +183,6 @@ dtype_get_mbmaxlen(
return type->mbmaxlen;
}
-/**********************************************************************//**
-Stores for a type the information which determines its alphabetical ordering
-and the storage size of an SQL NULL value. This is the >= 4.1.x storage
-format. */
-UNIV_INLINE
-void
-dtype_new_store_for_order_and_null_size(
-/*====================================*/
- byte* buf, /*!< in: buffer for
- DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE
- bytes where we store the info */
- const dtype_t* type, /*!< in: type struct */
- ulint prefix_len)/*!< in: prefix length to
- replace type->len, or 0 */
-{
- compile_time_assert(6 == DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE);
-
- ulint len;
-
- ut_ad(type);
- ut_ad(type->mtype >= DATA_VARCHAR);
- ut_ad(type->mtype <= DATA_MTYPE_MAX);
-
- buf[0] = (byte)(type->mtype & 0xFFUL);
-
- if (type->prtype & DATA_BINARY_TYPE) {
- buf[0] |= 128;
- }
-
- /* In versions < 4.1.2 we had: if (type->prtype & DATA_NONLATIN1) {
- buf[0] |= 64;
- }
- */
-
- buf[1] = (byte)(type->prtype & 0xFFUL);
-
- len = prefix_len ? prefix_len : type->len;
-
- mach_write_to_2(buf + 2, len & 0xFFFFUL);
-
- ut_ad(dtype_get_charset_coll(type->prtype) <= MAX_CHAR_COLL_NUM);
- mach_write_to_2(buf + 4, dtype_get_charset_coll(type->prtype));
-
- if (type->prtype & DATA_NOT_NULL) {
- buf[4] |= 128;
- }
-}
-
-/**********************************************************************//**
-Reads to a type the stored information which determines its alphabetical
-ordering and the storage size of an SQL NULL value. This is the < 4.1.x
-storage format. */
-UNIV_INLINE
-void
-dtype_read_for_order_and_null_size(
-/*===============================*/
- dtype_t* type, /*!< in: type struct */
- const byte* buf) /*!< in: buffer for stored type order info */
-{
- compile_time_assert(4 == DATA_ORDER_NULL_TYPE_BUF_SIZE);
- type->mtype = buf[0] & 63;
- type->prtype = buf[1];
-
- if (buf[0] & 128) {
- type->prtype |= DATA_BINARY_TYPE;
- }
-
- type->len = mach_read_from_2(buf + 2);
-
- type->prtype = dtype_form_prtype(type->prtype,
- data_mysql_default_charset_coll);
- dtype_set_mblen(type);
-}
-
-/**********************************************************************//**
-Reads to a type the stored information which determines its alphabetical
-ordering and the storage size of an SQL NULL value. This is the >= 4.1.x
-storage format. */
-UNIV_INLINE
-void
-dtype_new_read_for_order_and_null_size(
-/*===================================*/
- dtype_t* type, /*!< in: type struct */
- const byte* buf) /*!< in: buffer for stored type order info */
-{
- compile_time_assert(6 == DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE);
-
- type->mtype = buf[0] & 63;
- type->prtype = buf[1];
-
- if (buf[0] & 128) {
- type->prtype |= DATA_BINARY_TYPE;
- }
-
- if (buf[4] & 128) {
- type->prtype |= DATA_NOT_NULL;
- }
-
- type->len = mach_read_from_2(buf + 2);
-
- ulint charset_coll = mach_read_from_2(buf + 4) & CHAR_COLL_MASK;
-
- if (dtype_is_string_type(type->mtype)) {
- ut_a(charset_coll <= MAX_CHAR_COLL_NUM);
-
- if (charset_coll == 0) {
- /* This insert buffer record was inserted with MySQL
- version < 4.1.2, and the charset-collation code was not
- explicitly stored to dtype->prtype at that time. It
- must be the default charset-collation of this MySQL
- installation. */
-
- charset_coll = data_mysql_default_charset_coll;
- }
-
- type->prtype = dtype_form_prtype(type->prtype, charset_coll);
- }
- dtype_set_mblen(type);
-}
-
/***********************************************************************//**
Returns the size of a fixed size data type, 0 if not a fixed size type.
@return fixed size, or 0 */
diff --git a/storage/innobase/include/dict0boot.h b/storage/innobase/include/dict0boot.h
index a65287476ef..68400d2095d 100644
--- a/storage/innobase/include/dict0boot.h
+++ b/storage/innobase/include/dict0boot.h
@@ -44,39 +44,6 @@ dict_hdr_get_new_id(
(not assigned if NULL) */
uint32_t* space_id); /*!< out: space id
(not assigned if NULL) */
-/** Update dict_sys.row_id in the dictionary header file page. */
-void dict_hdr_flush_row_id(row_id_t id);
-/** @return A new value for GEN_CLUST_INDEX(DB_ROW_ID) */
-inline row_id_t dict_sys_t::get_new_row_id()
-{
- row_id_t id= row_id.fetch_add(1);
- if (!(id % ROW_ID_WRITE_MARGIN))
- dict_hdr_flush_row_id(id);
- return id;
-}
-
-/** Ensure that row_id is not smaller than id, on IMPORT TABLESPACE */
-inline void dict_sys_t::update_row_id(row_id_t id)
-{
- row_id_t sys_id= row_id;
- while (id >= sys_id)
- {
- if (!row_id.compare_exchange_strong(sys_id, id))
- continue;
- if (!(id % ROW_ID_WRITE_MARGIN))
- dict_hdr_flush_row_id(id);
- break;
- }
-}
-
-/**********************************************************************//**
-Writes a row id to a record or other 6-byte stored form. */
-inline void dict_sys_write_row_id(byte *field, row_id_t row_id)
-{
- static_assert(DATA_ROW_ID_LEN == 6, "compatibility");
- mach_write_to_6(field, row_id);
-}
-
/*****************************************************************//**
Initializes the data dictionary memory structures when the database is
started. This function is also called when the data dictionary is created.
@@ -116,7 +83,7 @@ inline bool dict_is_sys_table(table_id_t id) { return id < DICT_HDR_FIRST_ID; }
/*-------------------------------------------------------------*/
/* Dictionary header offsets */
-#define DICT_HDR_ROW_ID 0 /* The latest assigned row id */
+//#define DICT_HDR_ROW_ID 0 /* Was: latest assigned DB_ROW_ID */
#define DICT_HDR_TABLE_ID 8 /* The latest assigned table id */
#define DICT_HDR_INDEX_ID 16 /* The latest assigned index id */
#define DICT_HDR_MAX_SPACE_ID 24 /* The latest assigned space id,or 0*/
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index 8daa07160a3..628ad8366af 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -2,7 +2,7 @@
Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -634,8 +634,6 @@ dict_table_get_next_index(
#define dict_index_is_auto_gen_clust(index) (index)->is_gen_clust()
#define dict_index_is_unique(index) (index)->is_unique()
#define dict_index_is_spatial(index) (index)->is_spatial()
-#define dict_index_is_ibuf(index) (index)->is_ibuf()
-#define dict_index_is_sec_or_ibuf(index) !(index)->is_primary()
#define dict_index_has_virtual(index) (index)->has_virtual()
/** Get all the FTS indexes on a table.
@@ -650,7 +648,7 @@ dict_table_get_all_fts_indexes(
/********************************************************************//**
Gets the number of user-defined non-virtual columns in a table in the
dictionary cache.
-@return number of user-defined (e.g., not ROW_ID) non-virtual
+@return number of user-defined (e.g., not DB_ROW_ID) non-virtual
columns of a table */
UNIV_INLINE
unsigned
@@ -1372,27 +1370,10 @@ private:
std::atomic<table_id_t> temp_table_id{DICT_HDR_FIRST_ID};
/** hash table of temporary table IDs */
hash_table_t temp_id_hash;
- /** the next value of DB_ROW_ID, backed by DICT_HDR_ROW_ID
- (FIXME: remove this, and move to dict_table_t) */
- Atomic_relaxed<row_id_t> row_id;
- /** The synchronization interval of row_id */
- static constexpr size_t ROW_ID_WRITE_MARGIN= 256;
public:
/** Diagnostic message for exceeding the lock_wait() timeout */
static const char fatal_msg[];
- /** @return A new value for GEN_CLUST_INDEX(DB_ROW_ID) */
- inline row_id_t get_new_row_id();
-
- /** Ensure that row_id is not smaller than id, on IMPORT TABLESPACE */
- inline void update_row_id(row_id_t id);
-
- /** Recover the global DB_ROW_ID sequence on database startup */
- void recover_row_id(row_id_t id)
- {
- row_id= ut_uint64_align_up(id, ROW_ID_WRITE_MARGIN) + ROW_ID_WRITE_MARGIN;
- }
-
/** @return a new temporary table ID */
table_id_t acquire_temporary_table_id()
{
diff --git a/storage/innobase/include/dict0dict.inl b/storage/innobase/include/dict0dict.inl
index 4cc3eae96ab..ead22a21757 100644
--- a/storage/innobase/include/dict0dict.inl
+++ b/storage/innobase/include/dict0dict.inl
@@ -244,7 +244,7 @@ dict_table_get_next_index(
/********************************************************************//**
Gets the number of user-defined non-virtual columns in a table in the
dictionary cache.
-@return number of user-defined (e.g., not ROW_ID) non-virtual
+@return number of user-defined (e.g., not DB_ROW_ID) non-virtual
columns of a table */
UNIV_INLINE
unsigned
diff --git a/storage/innobase/include/dict0load.h b/storage/innobase/include/dict0load.h
index f7d33d5b43b..bd55848a776 100644
--- a/storage/innobase/include/dict0load.h
+++ b/storage/innobase/include/dict0load.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,9 +39,7 @@ Created 4/24/1996 Heikki Tuuri
/** A stack of table names related through foreign key constraints */
typedef std::deque<const char*, ut_allocator<const char*> > dict_names_t;
-/** Check each tablespace found in the data dictionary.
-Then look at each table defined in SYS_TABLES that has a space_id > 0
-to find all the file-per-table tablespaces.
+/** Open each tablespace found in the data dictionary.
In a crash recovery we already have some tablespace objects created from
processing the REDO log. We will compare the
@@ -50,7 +48,7 @@ tablespace file. In addition, more validation will be done if recovery
was needed and force_recovery is not set.
We also scan the biggest space id, and store it to fil_system. */
-void dict_check_tablespaces_and_store_max_id();
+void dict_load_tablespaces();
/** Make sure the data_file_name is saved in dict_table_t if needed.
@param[in,out] table Table object */
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index c76262ff5be..bbbda57b05d 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -64,7 +64,6 @@ combination of types */
auto-generated clustered indexes,
also DICT_UNIQUE will be set */
#define DICT_UNIQUE 2 /*!< unique index */
-#define DICT_IBUF 8 /*!< insert buffer tree */
#define DICT_CORRUPT 16 /*!< bit to store the corrupted flag
in SYS_INDEXES.TYPE */
#define DICT_FTS 32 /* FTS index; can't be combined with the
@@ -995,7 +994,7 @@ struct dict_index_t {
# define DICT_INDEX_MERGE_THRESHOLD_DEFAULT 50
unsigned type:DICT_IT_BITS;
/*!< index type (DICT_CLUSTERED, DICT_UNIQUE,
- DICT_IBUF, DICT_CORRUPT) */
+ DICT_CORRUPT) */
#define MAX_KEY_LENGTH_BITS 12
unsigned trx_id_offset:MAX_KEY_LENGTH_BITS;
/*!< position of the trx id column
@@ -1184,12 +1183,8 @@ public:
/** @return whether instant ALTER TABLE is in effect */
inline bool is_instant() const;
- /** @return whether the index is the primary key index
- (not the clustered index of the change buffer) */
- bool is_primary() const
- {
- return DICT_CLUSTERED == (type & (DICT_CLUSTERED | DICT_IBUF));
- }
+ /** @return whether the index is the primary key index */
+ bool is_primary() const { return is_clust(); }
/** @return whether this is a generated clustered index */
bool is_gen_clust() const { return type == DICT_CLUSTERED; }
@@ -1203,16 +1198,13 @@ public:
/** @return whether this is a spatial index */
bool is_spatial() const { return UNIV_UNLIKELY(type & DICT_SPATIAL); }
- /** @return whether this is the change buffer */
- bool is_ibuf() const { return UNIV_UNLIKELY(type & DICT_IBUF); }
-
/** @return whether this index requires locking */
- bool has_locking() const { return !is_ibuf(); }
+ static constexpr bool has_locking() { return true; }
/** @return whether this is a normal B-tree index
(not the change buffer, not SPATIAL or FULLTEXT) */
bool is_btree() const {
- return UNIV_LIKELY(!(type & (DICT_IBUF | DICT_SPATIAL
+ return UNIV_LIKELY(!(type & (DICT_SPATIAL
| DICT_FTS | DICT_CORRUPT)));
}
@@ -2355,6 +2347,8 @@ private:
Atomic_relaxed<pthread_t> lock_mutex_owner{0};
#endif
public:
+ /** The next DB_ROW_ID value */
+ Atomic_counter<uint64_t> row_id{0};
/** Autoinc counter value to give to the next inserted row. */
uint64_t autoinc;
diff --git a/storage/innobase/include/dict0types.h b/storage/innobase/include/dict0types.h
index ec50e8cd951..f6169227433 100644
--- a/storage/innobase/include/dict0types.h
+++ b/storage/innobase/include/dict0types.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -48,10 +48,6 @@ struct dict_add_v_col_t;
#define DICT_HDR_SPACE 0 /* the SYSTEM tablespace */
#define DICT_HDR_PAGE_NO FSP_DICT_HDR_PAGE_NO
-/* The ibuf table and indexes's ID are assigned as the number
-DICT_IBUF_ID_MIN plus the space id */
-#define DICT_IBUF_ID_MIN 0xFFFFFFFF00000000ULL
-
typedef ib_id_t table_id_t;
typedef ib_id_t index_id_t;
@@ -136,13 +132,6 @@ struct table_name_t
inline bool is_temporary() const;
};
-#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
-/** Dump the change buffer at startup */
-extern my_bool ibuf_dump;
-/** Flag to control insert buffer debugging. */
-extern uint ibuf_debug;
-#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
-
/** Shift for spatial status */
#define SPATIAL_STATUS_SHIFT 12
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index 483d594c6b9..f53279ecb88 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -51,35 +51,6 @@ using space_list_t= ilist<fil_space_t, space_list_tag_t>;
// Forward declaration
extern my_bool srv_use_doublewrite_buf;
-/** Possible values of innodb_flush_method */
-enum srv_flush_t
-{
- /** fsync, the default */
- SRV_FSYNC= 0,
- /** open log files in O_DSYNC mode */
- SRV_O_DSYNC,
- /** do not call os_file_flush() when writing data files, but do flush
- after writing to log files */
- SRV_LITTLESYNC,
- /** do not flush after writing */
- SRV_NOSYNC,
- /** invoke os_file_set_nocache() on data files. This implies using
- unbuffered I/O but still fdatasync(), because some filesystems might
- not flush meta-data on write completion */
- SRV_O_DIRECT,
- /** Like O_DIRECT, but skip fdatasync(), assuming that the data is
- durable on write completion */
- SRV_O_DIRECT_NO_FSYNC
-#ifdef _WIN32
- /** Traditional Windows appoach to open all files without caching,
- and do FileFlushBuffers() */
- ,SRV_ALL_O_DIRECT_FSYNC
-#endif
-};
-
-/** innodb_flush_method */
-extern ulong srv_file_flush_method;
-
/** Undo tablespaces starts with space_id. */
extern uint32_t srv_undo_space_id_start;
/** The number of UNDO tablespaces that are open and ready to use. */
@@ -631,6 +602,8 @@ private:
}
public:
+ /** Reopen all files on set_write_through() or set_buffered(). */
+ static void reopen_all();
/** Try to close a file to adhere to the innodb_open_files limit.
@param print_info whether to diagnose why a file cannot be closed
@return whether a file was closed */
@@ -1276,11 +1249,11 @@ constexpr uint16_t FIL_PAGE_RTREE= 17854;
constexpr uint16_t FIL_PAGE_UNDO_LOG= 2;
/** Index node (of file-in-file metadata) */
constexpr uint16_t FIL_PAGE_INODE= 3;
-/** Insert buffer free list */
+/** Former change buffer free list */
constexpr uint16_t FIL_PAGE_IBUF_FREE_LIST= 4;
/** Freshly allocated page */
constexpr uint16_t FIL_PAGE_TYPE_ALLOCATED= 0;
-/** Change buffer bitmap (pages n*innodb_page_size+1) */
+/** Former change buffer bitmap pages (pages n*innodb_page_size+1) */
constexpr uint16_t FIL_PAGE_IBUF_BITMAP= 5;
/** System page */
constexpr uint16_t FIL_PAGE_TYPE_SYS= 6;
@@ -1421,6 +1394,20 @@ public:
fil_space_t* temp_space; /*!< The innodb_temporary tablespace */
/** Map of fil_space_t::id to fil_space_t* */
hash_table_t spaces;
+
+ /** whether each write to data files is durable (O_DSYNC) */
+ my_bool write_through;
+ /** whether data files are buffered (not O_DIRECT) */
+ my_bool buffered;
+
+ /** Try to enable or disable write-through of data files */
+ void set_write_through(bool write_through);
+ /** Try to enable or disable file system caching of data files */
+ void set_buffered(bool buffered);
+
+ TPOOL_SUPPRESS_TSAN bool is_write_through() const { return write_through; }
+ TPOOL_SUPPRESS_TSAN bool is_buffered() const { return buffered; }
+
/** tablespaces for which fil_space_t::needs_flush() holds */
sized_ilist<fil_space_t, unflushed_spaces_tag_t> unflushed_spaces;
/** number of currently open files; protected by mutex */
@@ -1578,12 +1565,7 @@ template<bool have_reference> inline void fil_space_t::flush()
mysql_mutex_assert_not_owner(&fil_system.mutex);
ut_ad(!have_reference || (pending() & PENDING));
ut_ad(purpose == FIL_TYPE_TABLESPACE || purpose == FIL_TYPE_IMPORT);
- if (srv_file_flush_method == SRV_O_DIRECT_NO_FSYNC)
- {
- ut_ad(!is_in_unflushed_spaces);
- ut_ad(!needs_flush());
- }
- else if (have_reference)
+ if (have_reference)
flush_low();
else
{
diff --git a/storage/innobase/include/fsp0types.h b/storage/innobase/include/fsp0types.h
index 9a23e840380..757ead55d03 100644
--- a/storage/innobase/include/fsp0types.h
+++ b/storage/innobase/include/fsp0types.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2022, MariaDB Corporation.
+Copyright (c) 2014, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -157,28 +157,20 @@ this many file pages */
/* This has been replaced with either srv_page_size or page_zip->size. */
/** @name The space low address page map
-The pages at FSP_XDES_OFFSET and FSP_IBUF_BITMAP_OFFSET are repeated
+The 2 pages at FSP_XDES_OFFSET are repeated
every XDES_DESCRIBED_PER_PAGE pages in every tablespace. */
/* @{ */
/*--------------------------------------*/
#define FSP_XDES_OFFSET 0U /* !< extent descriptor */
-#define FSP_IBUF_BITMAP_OFFSET 1U /* !< insert buffer bitmap */
- /* The ibuf bitmap pages are the ones whose
- page number is the number above plus a
- multiple of XDES_DESCRIBED_PER_PAGE */
-
#define FSP_FIRST_INODE_PAGE_NO 2U /*!< in every tablespace */
/* The following pages exist
in the system tablespace (space 0). */
-#define FSP_IBUF_HEADER_PAGE_NO 3U /*!< insert buffer
+#define FSP_IBUF_HEADER_PAGE_NO 3U /*!< former change buffer
header page, in
tablespace 0 */
-#define FSP_IBUF_TREE_ROOT_PAGE_NO 4U /*!< insert buffer
+#define FSP_IBUF_TREE_ROOT_PAGE_NO 4U /*!< former change buffer
B-tree root page in
tablespace 0 */
- /* The ibuf tree root page number in
- tablespace 0; its fseg inode is on the page
- number FSP_FIRST_INODE_PAGE_NO */
#define FSP_TRX_SYS_PAGE_NO 5U /*!< transaction
system header, in
tablespace 0 */
diff --git a/storage/innobase/include/gis0rtree.h b/storage/innobase/include/gis0rtree.h
index b07261ce042..724a764d848 100644
--- a/storage/innobase/include/gis0rtree.h
+++ b/storage/innobase/include/gis0rtree.h
@@ -62,40 +62,45 @@ Created 2013/03/27 Jimmy Yang and Allen Lai
/** Search for a spatial index leaf page record.
@param cur cursor
+@param thr query thread
@param tuple search tuple
@param latch_mode latching mode
@param mtr mini-transaction
@param mode search mode */
-dberr_t rtr_search_leaf(btr_cur_t *cur, const dtuple_t *tuple,
+dberr_t rtr_search_leaf(btr_cur_t *cur, que_thr_t *thr, const dtuple_t *tuple,
btr_latch_mode latch_mode, mtr_t *mtr,
page_cur_mode_t mode= PAGE_CUR_RTREE_LOCATE)
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((nonnull(1,3,5), warn_unused_result));
/** Search for inserting a spatial index leaf page record.
@param cur cursor
@param tuple search tuple
@param latch_mode latching mode
@param mtr mini-transaction */
-inline dberr_t rtr_insert_leaf(btr_cur_t *cur, const dtuple_t *tuple,
+inline dberr_t rtr_insert_leaf(btr_cur_t *cur, que_thr_t *thr,
+ const dtuple_t *tuple,
btr_latch_mode latch_mode, mtr_t *mtr)
{
- return rtr_search_leaf(cur, tuple, latch_mode, mtr, PAGE_CUR_RTREE_INSERT);
+ return rtr_search_leaf(cur, thr, tuple, latch_mode, mtr,
+ PAGE_CUR_RTREE_INSERT);
}
/** Search for a spatial index leaf page record.
-@param pcur cursor
+@param pcur cursor
+@param thr query thread
@param tuple search tuple
@param mode search mode
@param mtr mini-transaction */
-dberr_t rtr_search_leaf(btr_pcur_t *pcur, const dtuple_t *tuple,
+dberr_t rtr_search_leaf(btr_pcur_t *pcur, que_thr_t *thr,
+ const dtuple_t *tuple,
page_cur_mode_t mode, mtr_t *mtr)
MY_ATTRIBUTE((nonnull, warn_unused_result));
-dberr_t rtr_search_to_nth_level(ulint level, const dtuple_t *tuple,
- page_cur_mode_t mode,
- btr_latch_mode latch_mode,
- btr_cur_t *cur, mtr_t *mtr)
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+dberr_t rtr_search_to_nth_level(btr_cur_t *cur, que_thr_t *thr,
+ const dtuple_t *tuple,
+ btr_latch_mode latch_mode, mtr_t *mtr,
+ page_cur_mode_t mode, ulint level)
+ MY_ATTRIBUTE((nonnull(1,3,5), warn_unused_result));
/**********************************************************************//**
Builds a Rtree node pointer out of a physical record and a page number.
@@ -132,7 +137,29 @@ rtr_page_split_and_insert(
const dtuple_t* tuple, /*!< in: tuple to insert */
ulint n_ext, /*!< in: number of externally stored columns */
mtr_t* mtr, /*!< in: mtr */
- dberr_t* err); /*!< out: error code */
+ dberr_t* err, /*!< out: error code */
+ que_thr_t* thr); /*!< in: query thread */
+
+/*************************************************************//**
+Makes tree one level higher by splitting the root, and inserts the tuple.
+NOTE that the operation of this function must always succeed,
+we cannot reverse it: therefore enough free disk space must be
+guaranteed to be available before this function is called.
+@return inserted record */
+rec_t*
+rtr_root_raise_and_insert(
+ ulint flags, /*!< in: undo logging and locking flags */
+ btr_cur_t* cursor, /*!< in: cursor at which to insert: must be
+ on the root page; when the function returns,
+ the cursor is positioned on the predecessor
+ of the inserted record */
+ rec_offs** offsets,/*!< out: offsets on inserted record */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
+ const dtuple_t* tuple, /*!< in: tuple to insert */
+ ulint n_ext, /*!< in: number of externally stored columns */
+ mtr_t* mtr, /*!< in: mtr */
+ dberr_t* err, /*!< out: error code */
+ que_thr_t* thr); /*!< in: query thread */
/**************************************************************//**
Sets the child node mbr in a node pointer. */
@@ -243,8 +270,8 @@ rtr_create_rtr_info(
bool init_matches, /*!< in: Whether to initiate the
"matches" structure for collecting
matched leaf records */
- btr_cur_t* cursor, /*!< in: tree search cursor */
- dict_index_t* index); /*!< in: index struct */
+ que_thr_t* thr, /*!< in/out: query thread */
+ btr_cur_t* cursor); /*!< in: tree search cursor */
/********************************************************************//**
Update a btr_cur_t with rtr_info */
@@ -299,8 +326,10 @@ rtr_get_mbr_from_tuple(
about parent nodes in search
@param[in,out] cursor cursor on node pointer record,
its page x-latched
+@param[in,out] thr query thread
@return whether the cursor was successfully positioned */
-bool rtr_page_get_father(mtr_t *mtr, btr_cur_t *sea_cur, btr_cur_t *cursor)
+bool rtr_page_get_father(mtr_t *mtr, btr_cur_t *sea_cur, btr_cur_t *cursor,
+ que_thr_t *thr)
MY_ATTRIBUTE((nonnull(1,3), warn_unused_result));
/************************************************************//**
@@ -312,11 +341,12 @@ rtr_page_get_father_block(
/*======================*/
rec_offs* offsets,/*!< in: work area for the return value */
mem_heap_t* heap, /*!< in: memory heap to use */
- mtr_t* mtr, /*!< in: mtr */
btr_cur_t* sea_cur,/*!< in: search cursor, contains information
about parent nodes in search */
- btr_cur_t* cursor);/*!< out: cursor on node pointer record,
+ btr_cur_t* cursor, /*!< out: cursor on node pointer record,
its page x-latched */
+ que_thr_t* thr, /*!< in/out: query thread */
+ mtr_t* mtr); /*!< in/out: mtr */
/**************************************************************//**
Store the parent path cursor
@return number of cursor stored */
@@ -337,6 +367,7 @@ bool rtr_search(
const dtuple_t* tuple, /*!< in: tuple on which search done */
btr_latch_mode latch_mode,/*!< in: BTR_MODIFY_LEAF, ... */
btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */
+ que_thr_t* thr, /*!< in/out; query thread */
mtr_t* mtr) /*!< in: mtr */
MY_ATTRIBUTE((warn_unused_result));
diff --git a/storage/innobase/include/gis0rtree.inl b/storage/innobase/include/gis0rtree.inl
index 5101eeb6f7a..460496d1978 100644
--- a/storage/innobase/include/gis0rtree.inl
+++ b/storage/innobase/include/gis0rtree.inl
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2014, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2021, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -240,6 +240,9 @@ rtr_info_reinit_in_cursor(
bool need_prdt) /*!< in: Whether predicate lock is
needed */
{
+ que_thr_t* thr = cursor->rtr_info->thr;
+ ut_ad(thr);
rtr_clean_rtr_info(cursor->rtr_info, false);
rtr_init_rtr_info(cursor->rtr_info, need_prdt, cursor, index, true);
+ cursor->rtr_info->thr = thr;
}
diff --git a/storage/innobase/include/ibuf0ibuf.h b/storage/innobase/include/ibuf0ibuf.h
index c246b2ef513..d1ff331fe21 100644
--- a/storage/innobase/include/ibuf0ibuf.h
+++ b/storage/innobase/include/ibuf0ibuf.h
@@ -1,7 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2022, MariaDB Corporation.
+Copyright (c) 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -17,420 +16,40 @@ this program; if not, write to the Free Software Foundation, Inc.,
*****************************************************************************/
-/**************************************************//**
-@file include/ibuf0ibuf.h
-Insert buffer
-
-Created 7/19/1997 Heikki Tuuri
-*******************************************************/
-
-#ifndef ibuf0ibuf_h
-#define ibuf0ibuf_h
-
-#include "mtr0mtr.h"
-#include "dict0mem.h"
-#include "fsp0fsp.h"
-
-/** Default value for maximum on-disk size of change buffer in terms
-of percentage of the buffer pool. */
-#define CHANGE_BUFFER_DEFAULT_SIZE (25)
-
-/* Possible operations buffered in the insert/whatever buffer. See
-ibuf_insert(). DO NOT CHANGE THE VALUES OF THESE, THEY ARE STORED ON DISK. */
-typedef enum {
- IBUF_OP_INSERT = 0,
- IBUF_OP_DELETE_MARK = 1,
- IBUF_OP_DELETE = 2,
-
- /* Number of different operation types. */
- IBUF_OP_COUNT = 3
-} ibuf_op_t;
-
-/** Combinations of operations that can be buffered.
-@see innodb_change_buffering_names */
-enum ibuf_use_t {
- IBUF_USE_NONE = 0,
- IBUF_USE_INSERT, /* insert */
- IBUF_USE_DELETE_MARK, /* delete */
- IBUF_USE_INSERT_DELETE_MARK, /* insert+delete */
- IBUF_USE_DELETE, /* delete+purge */
- IBUF_USE_ALL /* insert+delete+purge */
-};
-
-/** Operations that can currently be buffered. */
-extern ulong innodb_change_buffering;
-
-/** Insert buffer struct */
-struct ibuf_t{
- Atomic_relaxed<ulint> size; /*!< current size of the ibuf index
- tree, in pages */
- Atomic_relaxed<ulint> max_size; /*!< recommended maximum size of the
- ibuf index tree, in pages */
- ulint seg_size; /*!< allocated pages of the file
- segment containing ibuf header and
- tree */
- bool empty; /*!< Protected by the page
- latch of the root page of the
- insert buffer tree
- (FSP_IBUF_TREE_ROOT_PAGE_NO). true
- if and only if the insert
- buffer tree is empty. */
- ulint free_list_len; /*!< length of the free list */
- ulint height; /*!< tree height */
- dict_index_t* index; /*!< insert buffer index */
-
- /** number of pages merged */
- Atomic_counter<ulint> n_merges;
- Atomic_counter<ulint> n_merged_ops[IBUF_OP_COUNT];
- /*!< number of operations of each type
- merged to index pages */
- Atomic_counter<ulint> n_discarded_ops[IBUF_OP_COUNT];
- /*!< number of operations of each type
- discarded without merging due to the
- tablespace being deleted or the
- index being dropped */
-};
-
-/** The insert buffer control structure */
-extern ibuf_t ibuf;
-
-/* The purpose of the insert buffer is to reduce random disk access.
-When we wish to insert a record into a non-unique secondary index and
-the B-tree leaf page where the record belongs to is not in the buffer
-pool, we insert the record into the insert buffer B-tree, indexed by
-(space_id, page_no). When the page is eventually read into the buffer
-pool, we look up the insert buffer B-tree for any modifications to the
-page, and apply these upon the completion of the read operation. This
-is called the insert buffer merge. */
-
-/* The insert buffer merge must always succeed. To guarantee this,
-the insert buffer subsystem keeps track of the free space in pages for
-which it can buffer operations. Two bits per page in the insert
-buffer bitmap indicate the available space in coarse increments. The
-free bits in the insert buffer bitmap must never exceed the free space
-on a page. It is safe to decrement or reset the bits in the bitmap in
-a mini-transaction that is committed before the mini-transaction that
-affects the free space. It is unsafe to increment the bits in a
-separately committed mini-transaction, because in crash recovery, the
-free bits could momentarily be set too high. */
-
-/******************************************************************//**
-Creates the insert buffer data structure at a database startup.
-@return DB_SUCCESS or failure */
-dberr_t
-ibuf_init_at_db_start(void);
-/*=======================*/
-/*********************************************************************//**
-Updates the max_size value for ibuf. */
-void
-ibuf_max_size_update(
-/*=================*/
- ulint new_val); /*!< in: new value in terms of
- percentage of the buffer pool size */
-/*********************************************************************//**
-Reads the biggest tablespace id from the high end of the insert buffer
-tree and updates the counter in fil_system. */
-void
-ibuf_update_max_tablespace_id(void);
-/*===============================*/
-/***************************************************************//**
-Starts an insert buffer mini-transaction. */
-UNIV_INLINE
-void
-ibuf_mtr_start(
-/*===========*/
- mtr_t* mtr) /*!< out: mini-transaction */
- MY_ATTRIBUTE((nonnull));
-/***************************************************************//**
-Commits an insert buffer mini-transaction. */
-UNIV_INLINE
-void
-ibuf_mtr_commit(
-/*============*/
- mtr_t* mtr) /*!< in/out: mini-transaction */
- MY_ATTRIBUTE((nonnull));
-/************************************************************************//**
-Resets the free bits of the page in the ibuf bitmap. This is done in a
-separate mini-transaction, hence this operation does not restrict
-further work to only ibuf bitmap operations, which would result if the
-latch to the bitmap page were kept. NOTE: The free bits in the insert
-buffer bitmap must never exceed the free space on a page. It is safe
-to decrement or reset the bits in the bitmap in a mini-transaction
-that is committed before the mini-transaction that affects the free
-space. */
-void
-ibuf_reset_free_bits(
-/*=================*/
- buf_block_t* block); /*!< in: index page; free bits are set to 0
- if the index is a non-clustered
- non-unique, and page level is 0 */
-/************************************************************************//**
-Updates the free bits of an uncompressed page in the ibuf bitmap if
-there is not enough free on the page any more. This is done in a
-separate mini-transaction, hence this operation does not restrict
-further work to only ibuf bitmap operations, which would result if the
-latch to the bitmap page were kept. NOTE: The free bits in the insert
-buffer bitmap must never exceed the free space on a page. It is
-unsafe to increment the bits in a separately committed
-mini-transaction, because in crash recovery, the free bits could
-momentarily be set too high. It is only safe to use this function for
-decrementing the free bits. Should more free space become available,
-we must not update the free bits here, because that would break crash
-recovery. */
-UNIV_INLINE
-void
-ibuf_update_free_bits_if_full(
-/*==========================*/
- buf_block_t* block, /*!< in: index page to which we have added new
- records; the free bits are updated if the
- index is non-clustered and non-unique and
- the page level is 0, and the page becomes
- fuller */
- ulint max_ins_size,/*!< in: value of maximum insert size with
- reorganize before the latest operation
- performed to the page */
- ulint increase);/*!< in: upper limit for the additional space
- used in the latest operation, if known, or
- ULINT_UNDEFINED */
-/**********************************************************************//**
-Updates the free bits for an uncompressed page to reflect the present
-state. Does this in the mtr given, which means that the latching
-order rules virtually prevent any further operations for this OS
-thread until mtr is committed. NOTE: The free bits in the insert
-buffer bitmap must never exceed the free space on a page. It is safe
-to set the free bits in the same mini-transaction that updated the
-page. */
-void
-ibuf_update_free_bits_low(
-/*======================*/
- const buf_block_t* block, /*!< in: index page */
- ulint max_ins_size, /*!< in: value of
- maximum insert size
- with reorganize before
- the latest operation
- performed to the page */
- mtr_t* mtr); /*!< in/out: mtr */
-/**********************************************************************//**
-Updates the free bits for a compressed page to reflect the present
-state. Does this in the mtr given, which means that the latching
-order rules virtually prevent any further operations for this OS
-thread until mtr is committed. NOTE: The free bits in the insert
-buffer bitmap must never exceed the free space on a page. It is safe
-to set the free bits in the same mini-transaction that updated the
-page. */
-void
-ibuf_update_free_bits_zip(
-/*======================*/
- buf_block_t* block, /*!< in/out: index page */
- mtr_t* mtr); /*!< in/out: mtr */
-/**********************************************************************//**
-Updates the free bits for the two pages to reflect the present state.
-Does this in the mtr given, which means that the latching order rules
-virtually prevent any further operations until mtr is committed.
-NOTE: The free bits in the insert buffer bitmap must never exceed the
-free space on a page. It is safe to set the free bits in the same
-mini-transaction that updated the pages. */
-void
-ibuf_update_free_bits_for_two_pages_low(
-/*====================================*/
- buf_block_t* block1, /*!< in: index page */
- buf_block_t* block2, /*!< in: index page */
- mtr_t* mtr); /*!< in: mtr */
-/**********************************************************************//**
-A basic partial test if an insert to the insert buffer could be possible and
-recommended. */
-UNIV_INLINE
-ibool
-ibuf_should_try(
-/*============*/
- dict_index_t* index, /*!< in: index where to insert */
- ulint ignore_sec_unique); /*!< in: if != 0, we should
- ignore UNIQUE constraint on
- a secondary index when we
- decide */
-/******************************************************************//**
-Returns TRUE if the current OS thread is performing an insert buffer
-routine.
-
-For instance, a read-ahead of non-ibuf pages is forbidden by threads
-that are executing an insert buffer routine.
-@return TRUE if inside an insert buffer routine */
-UNIV_INLINE
-ibool
-ibuf_inside(
-/*========*/
- const mtr_t* mtr) /*!< in: mini-transaction */
- MY_ATTRIBUTE((warn_unused_result));
-
-/** Checks if a page address is an ibuf bitmap page (level 3 page) address.
-@param[in] page_id page id
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@return TRUE if a bitmap page */
-inline bool ibuf_bitmap_page(const page_id_t page_id, ulint zip_size)
-{
- ut_ad(ut_is_2pow(zip_size));
- ulint size = zip_size ? zip_size : srv_page_size;
- return (page_id.page_no() & (size - 1)) == FSP_IBUF_BITMAP_OFFSET;
-}
-
-/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages.
-Must not be called when recv_no_ibuf_operations==true.
-@param[in] page_id page id
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in] x_latch FALSE if relaxed check (avoid latching the
-bitmap page)
-@param[in,out] mtr mtr which will contain an x-latch to the
-bitmap page if the page is not one of the fixed address ibuf pages, or NULL,
-in which case a new transaction is created.
-@return true if level 2 or level 3 page */
-bool
-ibuf_page_low(
- const page_id_t page_id,
- ulint zip_size,
-#ifdef UNIV_DEBUG
- bool x_latch,
-#endif /* UNIV_DEBUG */
- mtr_t* mtr)
- MY_ATTRIBUTE((warn_unused_result));
-
-#ifdef UNIV_DEBUG
-/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages.
-Must not be called when recv_no_ibuf_operations==true.
-@param[in] page_id tablespace/page identifier
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in,out] mtr mini-transaction or NULL
-@return TRUE if level 2 or level 3 page */
-# define ibuf_page(page_id, zip_size, mtr) \
- ibuf_page_low(page_id, zip_size, true, mtr)
-
-#else /* UNIV_DEBUG */
-
-/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages.
-Must not be called when recv_no_ibuf_operations==true.
-@param[in] page_id tablespace/page identifier
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in,out] mtr mini-transaction or NULL
-@return TRUE if level 2 or level 3 page */
-# define ibuf_page(page_id, zip_size, mtr) \
- ibuf_page_low(page_id, zip_size, mtr)
-
-#endif /* UNIV_DEBUG */
-/***********************************************************************//**
-Frees excess pages from the ibuf free list. This function is called when an OS
-thread calls fsp services to allocate a new file segment, or a new page to a
-file segment, and the thread did not own the fsp latch before this call. */
-void
-ibuf_free_excess_pages(void);
-/*========================*/
-
-/** Buffer an operation in the change buffer, instead of applying it
-directly to the file page, if this is possible. Does not do it if the index
-is clustered or unique.
-@param[in] op operation type
-@param[in] entry index entry to insert
-@param[in,out] index index where to insert
-@param[in] page_id page id where to insert
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@param[in,out] thr query thread
-@return true if success */
-bool
-ibuf_insert(
- ibuf_op_t op,
- const dtuple_t* entry,
- dict_index_t* index,
- const page_id_t page_id,
- ulint zip_size,
- que_thr_t* thr);
-
-/** Check whether buffered changes exist for a page.
-@param[in] id page identifier
-@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@return whether buffered changes exist */
-bool ibuf_page_exists(const page_id_t id, ulint zip_size);
-
-/** When an index page is read from a disk to the buffer pool, this function
-applies any buffered operations to the page and deletes the entries from the
-insert buffer. If the page is not read, but created in the buffer pool, this
-function deletes its buffered entries from the insert buffer; there can
-exist entries for such a page if the page belonged to an index which
-subsequently was dropped.
-@param block X-latched page to try to apply changes to, or NULL to discard
-@param page_id page identifier
-@param zip_size ROW_FORMAT=COMPRESSED page size, or 0
-@return error code */
-dberr_t ibuf_merge_or_delete_for_page(buf_block_t *block,
- const page_id_t page_id,
- ulint zip_size);
-
-/** Delete all change buffer entries for a tablespace,
-in DISCARD TABLESPACE, IMPORT TABLESPACE, or read-ahead.
-@param[in] space missing or to-be-discarded tablespace */
-void ibuf_delete_for_discarded_space(uint32_t space);
-
-/** Contract the change buffer by reading pages to the buffer pool.
-@return a lower limit for the combined size in bytes of entries which
-will be merged from ibuf trees to the pages read
-@retval 0 if ibuf.empty */
-ulint ibuf_contract();
-
-/** Contracts insert buffer trees by reading pages referring to space_id
-to the buffer pool.
-@returns number of pages merged.*/
-ulint
-ibuf_merge_space(
-/*=============*/
- ulint space); /*!< in: space id */
-
-/******************************************************************//**
-Looks if the insert buffer is empty.
-@return true if empty */
-bool
-ibuf_is_empty(void);
-/*===============*/
-/******************************************************************//**
-Prints info of ibuf. */
-void
-ibuf_print(
-/*=======*/
- FILE* file); /*!< in: file where to print */
-/********************************************************************
-Read the first two bytes from a record's fourth field (counter field in new
-records; something else in older records).
-@return "counter" field, or ULINT_UNDEFINED if for some reason it can't be read */
-ulint
-ibuf_rec_get_counter(
-/*=================*/
- const rec_t* rec); /*!< in: ibuf record */
-/******************************************************************//**
-Closes insert buffer and frees the data structures. */
-void
-ibuf_close(void);
-/*============*/
-
-/** Check the insert buffer bitmaps on IMPORT TABLESPACE.
-@param[in] trx transaction
-@param[in,out] space tablespace being imported
-@return DB_SUCCESS or error code */
-dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
- MY_ATTRIBUTE((nonnull, warn_unused_result));
-
-/** Update free bits and buffered bits for bulk loaded page.
-@param block secondary index leaf page
-@param mtr mini-transaction
-@param reset whether the page is full */
-void ibuf_set_bitmap_for_bulk_load(buf_block_t *block, mtr_t *mtr, bool reset);
-
-#define IBUF_HEADER_PAGE_NO FSP_IBUF_HEADER_PAGE_NO
-#define IBUF_TREE_ROOT_PAGE_NO FSP_IBUF_TREE_ROOT_PAGE_NO
-
-/* The ibuf header page currently contains only the file segment header
-for the file segment from which the pages for the ibuf tree are allocated */
-#define IBUF_HEADER PAGE_DATA
-#define IBUF_TREE_SEG_HEADER 0 /* fseg header for ibuf tree */
-
-/* The insert buffer tree itself is always located in space 0. */
-#define IBUF_SPACE_ID static_cast<ulint>(0)
-
-#include "ibuf0ibuf.inl"
-
-#endif
+#include "db0err.h"
+
+/* The purpose of the change buffer was to reduce random disk access.
+When we wished to
+(1) insert a record into a non-unique secondary index,
+(2) delete-mark a secondary index record,
+(3) delete a secondary index record as part of purge (but not ROLLBACK),
+and the B-tree leaf page where the record belongs to is not in the buffer
+pool, we inserted a record into the change buffer B-tree, indexed by
+the page identifier. When the page was eventually read into the buffer
+pool, we looked up the change buffer B-tree for any modifications to the
+page, applied these upon the completion of the read operation. This
+was called the insert buffer merge.
+
+There was a hash index of the change buffer B-tree, implemented as the
+"change buffer bitmap". Bits in these bitmap pages indicated how full
+the page roughly was, and whether any records for the page identifier
+exist in the change buffer. The "free" bits had to be updated as part of
+operations that modified secondary index leaf pages.
+
+Because the change buffer has been removed, we will no longer update
+any change buffer bitmap pages. Instead, on database startup, we will
+check if an upgrade needs to be performed, and apply any buffered
+changes if that is the case. Finally, the change buffer will be
+transformed to a format that will not be recognized by earlier
+versions of MariaDB Server, to prevent downgrades from causing
+corruption (due to the removed updates of the bitmap pages) when the
+change buffer might be enabled. */
+
+/** Check if ibuf_upgrade() is needed as part of server startup.
+@return error code
+@retval DB_SUCCESS if no upgrade is needed
+@retval DB_FAIL if the change buffer is not empty (need ibuf_upgrade()) */
+dberr_t ibuf_upgrade_needed();
+
+/** Upgrade the change buffer after all redo log has been applied. */
+dberr_t ibuf_upgrade();
diff --git a/storage/innobase/include/ibuf0ibuf.inl b/storage/innobase/include/ibuf0ibuf.inl
deleted file mode 100644
index 003bf22a047..00000000000
--- a/storage/innobase/include/ibuf0ibuf.inl
+++ /dev/null
@@ -1,282 +0,0 @@
-/*****************************************************************************
-
-Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2021, MariaDB Corporation.
-
-This program is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
-
-*****************************************************************************/
-
-/**************************************************//**
-@file include/ibuf0ibuf.ic
-Insert buffer
-
-Created 7/19/1997 Heikki Tuuri
-*******************************************************/
-
-#include "page0page.h"
-#include "page0zip.h"
-#include "fsp0types.h"
-#include "buf0lru.h"
-
-/** An index page must contain at least srv_page_size /
-IBUF_PAGE_SIZE_PER_FREE_SPACE bytes of free space for ibuf to try to
-buffer inserts to this page. If there is this much of free space, the
-corresponding bits are set in the ibuf bitmap. */
-#define IBUF_PAGE_SIZE_PER_FREE_SPACE 32
-
-/***************************************************************//**
-Starts an insert buffer mini-transaction. */
-UNIV_INLINE
-void
-ibuf_mtr_start(
-/*===========*/
- mtr_t* mtr) /*!< out: mini-transaction */
-{
- mtr_start(mtr);
- mtr->enter_ibuf();
-
- if (high_level_read_only || srv_read_only_mode) {
- mtr_set_log_mode(mtr, MTR_LOG_NO_REDO);
- }
-
-}
-/***************************************************************//**
-Commits an insert buffer mini-transaction. */
-UNIV_INLINE
-void
-ibuf_mtr_commit(
-/*============*/
- mtr_t* mtr) /*!< in/out: mini-transaction */
-{
- ut_ad(mtr->is_inside_ibuf());
- ut_d(mtr->exit_ibuf());
-
- mtr_commit(mtr);
-}
-
-/************************************************************************//**
-Sets the free bit of the page in the ibuf bitmap. This is done in a separate
-mini-transaction, hence this operation does not restrict further work to only
-ibuf bitmap operations, which would result if the latch to the bitmap page
-were kept. */
-void
-ibuf_set_free_bits_func(
-/*====================*/
- buf_block_t* block, /*!< in: index page of a non-clustered index;
- free bit is reset if page level is 0 */
-#ifdef UNIV_IBUF_DEBUG
- ulint max_val,/*!< in: ULINT_UNDEFINED or a maximum
- value which the bits must have before
- setting; this is for debugging */
-#endif /* UNIV_IBUF_DEBUG */
- ulint val); /*!< in: value to set: < 4 */
-#ifdef UNIV_IBUF_DEBUG
-# define ibuf_set_free_bits(b,v,max) ibuf_set_free_bits_func(b,max,v)
-#else /* UNIV_IBUF_DEBUG */
-# define ibuf_set_free_bits(b,v,max) ibuf_set_free_bits_func(b,v)
-#endif /* UNIV_IBUF_DEBUG */
-
-/**********************************************************************//**
-A basic partial test if an insert to the insert buffer could be possible and
-recommended. */
-UNIV_INLINE
-ibool
-ibuf_should_try(
-/*============*/
- dict_index_t* index, /*!< in: index where to insert */
- ulint ignore_sec_unique) /*!< in: if != 0, we should
- ignore UNIQUE constraint on
- a secondary index when we
- decide */
-{
- if (index->type & (DICT_CLUSTERED | DICT_IBUF | DICT_SPATIAL) ||
- !innodb_change_buffering || !ibuf.max_size)
- return false;
- if (!ignore_sec_unique && index->is_unique())
- return false;
- if (index->table->quiesce != QUIESCE_NONE)
- return false;
- for (unsigned i= 0; i < index->n_fields; i++)
- if (index->fields[i].descending)
- return false;
- return true;
-}
-
-/******************************************************************//**
-Returns TRUE if the current OS thread is performing an insert buffer
-routine.
-
-For instance, a read-ahead of non-ibuf pages is forbidden by threads
-that are executing an insert buffer routine.
-@return TRUE if inside an insert buffer routine */
-UNIV_INLINE
-ibool
-ibuf_inside(
-/*========*/
- const mtr_t* mtr) /*!< in: mini-transaction */
-{
- return(mtr->is_inside_ibuf());
-}
-
-/** Translates the free space on a page to a value in the ibuf bitmap.
-@param[in] page_size page size in bytes
-@param[in] max_ins_size maximum insert size after reorganize for
-the page
-@return value for ibuf bitmap bits */
-UNIV_INLINE
-ulint
-ibuf_index_page_calc_free_bits(
- ulint page_size,
- ulint max_ins_size)
-{
- ulint n;
- ut_ad(ut_is_2pow(page_size));
- ut_ad(page_size > IBUF_PAGE_SIZE_PER_FREE_SPACE);
-
- n = max_ins_size / (page_size / IBUF_PAGE_SIZE_PER_FREE_SPACE);
-
- if (n == 3) {
- n = 2;
- }
-
- if (n > 3) {
- n = 3;
- }
-
- return(n);
-}
-
-/*********************************************************************//**
-Translates the free space on a compressed page to a value in the ibuf bitmap.
-@return value for ibuf bitmap bits */
-UNIV_INLINE
-ulint
-ibuf_index_page_calc_free_zip(
-/*==========================*/
- const buf_block_t* block) /*!< in: buffer block */
-{
- ulint max_ins_size;
- const page_zip_des_t* page_zip;
- lint zip_max_ins;
-
- ut_ad(block->page.zip.data);
-
- /* Consider the maximum insert size on the uncompressed page
- without reorganizing the page. We must not assume anything
- about the compression ratio. If zip_max_ins > max_ins_size and
- there is 1/4 garbage on the page, recompression after the
- reorganize could fail, in theory. So, let us guarantee that
- merging a buffered insert to a compressed page will always
- succeed without reorganizing or recompressing the page, just
- by using the page modification log. */
- max_ins_size = page_get_max_insert_size(
- buf_block_get_frame(block), 1);
-
- page_zip = buf_block_get_page_zip(block);
- zip_max_ins = page_zip_max_ins_size(page_zip,
- FALSE/* not clustered */);
-
- if (zip_max_ins < 0) {
- return(0);
- } else if (max_ins_size > (ulint) zip_max_ins) {
- max_ins_size = (ulint) zip_max_ins;
- }
-
- return(ibuf_index_page_calc_free_bits(block->physical_size(),
- max_ins_size));
-}
-
-/*********************************************************************//**
-Translates the free space on a page to a value in the ibuf bitmap.
-@return value for ibuf bitmap bits */
-UNIV_INLINE
-ulint
-ibuf_index_page_calc_free(
-/*======================*/
- const buf_block_t* block) /*!< in: buffer block */
-{
- if (!block->page.zip.data) {
- ulint max_ins_size;
-
- max_ins_size = page_get_max_insert_size_after_reorganize(
- buf_block_get_frame(block), 1);
-
- return(ibuf_index_page_calc_free_bits(
- block->physical_size(), max_ins_size));
- } else {
- return(ibuf_index_page_calc_free_zip(block));
- }
-}
-
-/************************************************************************//**
-Updates the free bits of an uncompressed page in the ibuf bitmap if
-there is not enough free on the page any more. This is done in a
-separate mini-transaction, hence this operation does not restrict
-further work to only ibuf bitmap operations, which would result if the
-latch to the bitmap page were kept. NOTE: The free bits in the insert
-buffer bitmap must never exceed the free space on a page. It is
-unsafe to increment the bits in a separately committed
-mini-transaction, because in crash recovery, the free bits could
-momentarily be set too high. It is only safe to use this function for
-decrementing the free bits. Should more free space become available,
-we must not update the free bits here, because that would break crash
-recovery. */
-UNIV_INLINE
-void
-ibuf_update_free_bits_if_full(
-/*==========================*/
- buf_block_t* block, /*!< in: index page to which we have added new
- records; the free bits are updated if the
- index is non-clustered and non-unique and
- the page level is 0, and the page becomes
- fuller */
- ulint max_ins_size,/*!< in: value of maximum insert size with
- reorganize before the latest operation
- performed to the page */
- ulint increase)/*!< in: upper limit for the additional space
- used in the latest operation, if known, or
- ULINT_UNDEFINED */
-{
- ulint before;
- ulint after;
-
- ut_ad(buf_block_get_page_zip(block) == NULL);
-
- before = ibuf_index_page_calc_free_bits(
- srv_page_size, max_ins_size);
-
- if (max_ins_size >= increase) {
- compile_time_assert(ULINT32_UNDEFINED > UNIV_PAGE_SIZE_MAX);
- after = ibuf_index_page_calc_free_bits(
- srv_page_size, max_ins_size - increase);
-#ifdef UNIV_IBUF_DEBUG
- ut_a(after <= ibuf_index_page_calc_free(block));
-#endif
- } else {
- after = ibuf_index_page_calc_free(block);
- }
-
- if (after == 0) {
- /* We move the page to the front of the buffer pool LRU list:
- the purpose of this is to prevent those pages to which we
- cannot make inserts using the insert buffer from slipping
- out of the buffer pool */
-
- buf_page_make_young(&block->page);
- }
-
- if (before > after) {
- ibuf_set_free_bits(block, after, before);
- }
-}
diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h
index 09e4ece8894..8afa92abc93 100644
--- a/storage/innobase/include/log0log.h
+++ b/storage/innobase/include/log0log.h
@@ -275,6 +275,8 @@ public:
bool log_maybe_unbuffered;
# endif
#endif
+ /** whether each write to ib_logfile0 is durable (O_DSYNC) */
+ my_bool log_write_through;
/** Fields involved in checkpoints @{ */
lsn_t log_capacity; /*!< capacity of the log; if
@@ -362,6 +364,8 @@ public:
/** Try to enable or disable file system caching (update log_buffered) */
void set_buffered(bool buffered);
#endif
+ /** Try to enable or disable durable writes (update log_write_through) */
+ void set_write_through(bool write_through);
void attach(log_file_t file, os_offset_t size);
diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h
index e787d81e8c2..c0b79f1a76d 100644
--- a/storage/innobase/include/log0recv.h
+++ b/storage/innobase/include/log0recv.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -417,16 +417,6 @@ public:
/** The recovery system */
extern recv_sys_t recv_sys;
-/** If the following is TRUE, the buffer pool file pages must be invalidated
-after recovery and no ibuf operations are allowed; this will be set if
-recv_sys.pages becomes too full, and log records must be merged
-to file pages already before the recovery is finished: in this case no
-ibuf operations are allowed, as they could modify the pages read in the
-buffer pool before the pages have been recovered to the up-to-date state.
-
-TRUE means that recovery is running and no operations on the log files
-are allowed yet: the variable name is misleading. */
-extern bool recv_no_ibuf_operations;
/** TRUE when recv_init_crash_recovery() has been called. */
extern bool recv_needed_recovery;
#ifdef UNIV_DEBUG
diff --git a/storage/innobase/include/mtr0mtr.h b/storage/innobase/include/mtr0mtr.h
index b8df6d9f63e..5576560dca8 100644
--- a/storage/innobase/include/mtr0mtr.h
+++ b/storage/innobase/include/mtr0mtr.h
@@ -308,15 +308,6 @@ public:
@retval 0 if the transaction only modified temporary tablespaces */
lsn_t commit_lsn() const { ut_ad(has_committed()); return m_commit_lsn; }
- /** Note that we are inside the change buffer code. */
- void enter_ibuf() { m_inside_ibuf= true; }
-
- /** Note that we have exited from the change buffer code. */
- void exit_ibuf() { m_inside_ibuf= false; }
-
- /** @return true if we are inside the change buffer code */
- bool is_inside_ibuf() const { return m_inside_ibuf; }
-
/** Note that some pages have been freed */
void set_trim_pages() { m_trim_pages= true; }
@@ -745,10 +736,6 @@ private:
/** whether log_sys.latch is locked exclusively */
uint16_t m_latch_ex:1;
- /** whether change buffer is latched; only needed in non-debug builds
- to suppress some read-ahead operations, @see ibuf_inside() */
- uint16_t m_inside_ibuf:1;
-
/** whether the pages has been trimmed */
uint16_t m_trim_pages:1;
diff --git a/storage/innobase/include/page0cur.h b/storage/innobase/include/page0cur.h
index 28aa30565e4..279138acd79 100644
--- a/storage/innobase/include/page0cur.h
+++ b/storage/innobase/include/page0cur.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2018, 2022, MariaDB Corporation.
+Copyright (c) 2018, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -117,11 +117,6 @@ succeed, i.e., enough space available, NULL otherwise. The cursor stays at
the same logical position, but the physical position may change if it is
pointing to a compressed page that was reorganized.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if this is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
@return pointer to record if succeed, NULL otherwise */
UNIV_INLINE
rec_t*
@@ -151,11 +146,6 @@ page_cur_insert_rec_low(
Inserts a record next to page cursor on a compressed and uncompressed
page.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if this is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
@return pointer to inserted record
@return nullptr on failure */
rec_t*
diff --git a/storage/innobase/include/page0cur.inl b/storage/innobase/include/page0cur.inl
index 7c4eafa266a..a73c31a7bff 100644
--- a/storage/innobase/include/page0cur.inl
+++ b/storage/innobase/include/page0cur.inl
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2022, MariaDB Corporation.
+Copyright (c) 2015, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -155,11 +155,6 @@ succeed, i.e., enough space available, NULL otherwise. The cursor stays at
the same logical position, but the physical position may change if it is
pointing to a compressed page that was reorganized.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if this is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
@return pointer to record if succeed, NULL otherwise */
UNIV_INLINE
rec_t*
diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h
index 2978656b508..38373f6bb19 100644
--- a/storage/innobase/include/page0page.h
+++ b/storage/innobase/include/page0page.h
@@ -1,6 +1,6 @@
/*****************************************************************************
Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -95,7 +95,7 @@ this byte can be garbage. */
direction */
#define PAGE_N_RECS 16 /* number of user records on the page */
/** The largest DB_TRX_ID that may have modified a record on the page;
-Defined only in secondary index leaf pages and in change buffer leaf pages.
+Defined only in secondary index leaf pages.
Otherwise written as 0. @see PAGE_ROOT_AUTO_INC */
#define PAGE_MAX_TRX_ID 18
/** The AUTO_INCREMENT value (on persistent clustered index root pages). */
@@ -901,11 +901,6 @@ MY_ATTRIBUTE((nonnull, warn_unused_result))
Differs from page_copy_rec_list_end, because this function does not
touch the lock table and max trx id on page or compress the page.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if new_block is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_t::commit().
-
@return error code */
dberr_t
page_copy_rec_list_end_no_locks(
@@ -920,11 +915,6 @@ Copies records from page to new_page, from the given record onward,
including that record. Infimum and supremum records are not copied.
The records are copied to the start of the record list on new_page.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if new_block is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_t::commit().
-
@return pointer to the original successor of the infimum record on new_block
@retval nullptr on ROW_FORMAT=COMPRESSED page overflow */
rec_t*
@@ -942,11 +932,6 @@ Copies records from page to new_page, up to the given record, NOT
including that record. Infimum and supremum records are not copied.
The records are copied to the end of the record list on new_page.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if new_block is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
@return pointer to the original predecessor of the supremum record on new_block
@retval nullptr on ROW_FORMAT=COMPRESSED page overflow */
rec_t*
diff --git a/storage/innobase/include/page0zip.h b/storage/innobase/include/page0zip.h
index 4332990619e..501ef31a8f9 100644
--- a/storage/innobase/include/page0zip.h
+++ b/storage/innobase/include/page0zip.h
@@ -2,7 +2,7 @@
Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -213,9 +213,9 @@ page_zip_max_ins_size(
/**********************************************************************//**
Determine if enough space is available in the modification log.
-@return TRUE if page_zip_write_rec() will succeed */
+@return true if page_zip_write_rec() will succeed */
UNIV_INLINE
-ibool
+bool
page_zip_available(
/*===============*/
const page_zip_des_t* page_zip,/*!< in: compressed page */
@@ -323,10 +323,6 @@ Reorganize and compress a page. This is a low-level operation for
compressed pages, to be used when page_zip_compress() fails.
On success, redo log will be written.
The function btr_page_reorganize() should be preferred whenever possible.
-IMPORTANT: if page_zip_reorganize() is invoked on a leaf page of a
-non-clustered index, the caller must update the insert buffer free
-bits in the same mini-transaction in such a way that the modification
-will be redo-logged.
@return error code
@retval DB_FAIL on overflow; the block_zip will be left intact */
dberr_t
diff --git a/storage/innobase/include/page0zip.inl b/storage/innobase/include/page0zip.inl
index afc877c3720..edcd4ab48fa 100644
--- a/storage/innobase/include/page0zip.inl
+++ b/storage/innobase/include/page0zip.inl
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -269,7 +269,7 @@ page_zip_max_ins_size(
Determine if enough space is available in the modification log.
@return TRUE if enough space is available */
UNIV_INLINE
-ibool
+bool
page_zip_available(
/*===============*/
const page_zip_des_t* page_zip,/*!< in: compressed page */
diff --git a/storage/innobase/include/rem0rec.inl b/storage/innobase/include/rem0rec.inl
index 46c209cbdec..da7337a3b82 100644
--- a/storage/innobase/include/rem0rec.inl
+++ b/storage/innobase/include/rem0rec.inl
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1095,9 +1095,7 @@ rec_get_converted_size(
ut_ad(dtuple_check_typed(dtuple));
#ifdef UNIV_DEBUG
- if (dict_index_is_ibuf(index)) {
- ut_ad(dtuple->n_fields > 1);
- } else if ((dtuple_get_info_bits(dtuple) & REC_NEW_STATUS_MASK)
+ if ((dtuple_get_info_bits(dtuple) & REC_NEW_STATUS_MASK)
== REC_STATUS_NODE_PTR) {
ut_ad(dtuple->n_fields - 1
== dict_index_get_n_unique_in_tree_nonleaf(index));
diff --git a/storage/innobase/include/row0purge.h b/storage/innobase/include/row0purge.h
index b1390fd1ef1..686bbaa7384 100644
--- a/storage/innobase/include/row0purge.h
+++ b/storage/innobase/include/row0purge.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -37,39 +37,6 @@ Created 3/14/1997 Heikki Tuuri
#include <queue>
class MDL_ticket;
-/** Determines if it is possible to remove a secondary index entry.
-Removal is possible if the secondary index entry does not refer to any
-not delete marked version of a clustered index record where DB_TRX_ID
-is newer than the purge view.
-
-NOTE: This function should only be called by the purge thread, only
-while holding a latch on the leaf page of the secondary index entry
-(or keeping the buffer pool watch on the page). It is possible that
-this function first returns true and then false, if a user transaction
-inserts a record that the secondary index entry would refer to.
-However, in that case, the user transaction would also re-insert the
-secondary index entry after purge has removed it and released the leaf
-page latch.
-@param[in,out] node row purge node
-@param[in] index secondary index
-@param[in] entry secondary index entry
-@param[in,out] sec_pcur secondary index cursor or NULL
- if it is called for purge buffering
- operation.
-@param[in,out] sec_mtr mini-transaction which holds
- secondary index entry or NULL if it is
- called for purge buffering operation.
-@param[in] is_tree true=pessimistic purge,
- false=optimistic (leaf-page only)
-@return true if the secondary index record can be purged */
-bool
-row_purge_poss_sec(
- purge_node_t* node,
- dict_index_t* index,
- const dtuple_t* entry,
- btr_pcur_t* sec_pcur=NULL,
- mtr_t* sec_mtr=NULL,
- bool is_tree=false);
/***************************************************************
Does the purge operation.
diff --git a/storage/innobase/include/row0row.h b/storage/innobase/include/row0row.h
index a1350740e2a..a26924d08a0 100644
--- a/storage/innobase/include/row0row.h
+++ b/storage/innobase/include/row0row.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2022, MariaDB Corporation.
+Copyright (c) 2016, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -28,7 +28,6 @@ Created 4/20/1996 Heikki Tuuri
#define row0row_h
#include "que0types.h"
-#include "ibuf0ibuf.h"
#include "trx0types.h"
#include "mtr0mtr.h"
#include "rem0types.h"
@@ -344,23 +343,10 @@ row_parse_int(
ulint mtype,
bool unsigned_type);
-/** Result of row_search_index_entry */
-enum row_search_result {
- ROW_FOUND = 0, /*!< the record was found */
- ROW_NOT_FOUND, /*!< record not found */
- ROW_BUFFERED, /*!< one of BTR_INSERT, BTR_DELETE, or
- BTR_DELETE_MARK was specified, the
- secondary index leaf page was not in
- the buffer pool, and the operation was
- enqueued in the insert/delete buffer */
- ROW_NOT_DELETED_REF /*!< BTR_DELETE was specified, and
- row_purge_poss_sec() failed */
-};
-
/***************************************************************//**
Searches an index record.
-@return whether the record was found or buffered */
-enum row_search_result
+@return whether the record was found */
+bool
row_search_index_entry(
/*===================*/
const dtuple_t* entry, /*!< in: index entry */
@@ -398,22 +384,17 @@ row_raw_format(
in bytes */
MY_ATTRIBUTE((nonnull, warn_unused_result));
+#include "dict0mem.h"
+
/** Prepare to start a mini-transaction to modify an index.
@param[in,out] mtr mini-transaction
-@param[in,out] index possibly secondary index
-@param[in] pessimistic whether this is a pessimistic operation */
-inline
-void
-row_mtr_start(mtr_t* mtr, dict_index_t* index, bool pessimistic)
+@param[in,out] index possibly secondary index */
+inline void row_mtr_start(mtr_t* mtr, dict_index_t* index)
{
mtr->start();
switch (index->table->space_id) {
- case IBUF_SPACE_ID:
- if (pessimistic
- && !(index->type & (DICT_UNIQUE | DICT_SPATIAL))) {
- ibuf_free_excess_pages();
- }
+ case 0:
break;
case SRV_TMP_SPACE_ID:
mtr->set_log_mode(MTR_LOG_NO_REDO);
diff --git a/storage/innobase/include/srv0mon.h b/storage/innobase/include/srv0mon.h
index 51f3049b81a..4672ce00a36 100644
--- a/storage/innobase/include/srv0mon.h
+++ b/storage/innobase/include/srv0mon.h
@@ -2,7 +2,7 @@
Copyright (c) 2010, 2015, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
@@ -228,12 +228,8 @@ enum monitor_id_t {
MONITOR_MODULE_BUF_PAGE,
MONITOR_INDEX_LEAF_PAGE_READ,
MONITOR_INDEX_NON_LEAF_PAGE_READ,
- MONITOR_INDEX_IBUF_LEAF_PAGE_READ,
- MONITOR_INDEX_IBUF_NON_LEAF_PAGE_READ,
MONITOR_UNDO_LOG_PAGE_READ,
MONITOR_INODE_PAGE_READ,
- MONITOR_IBUF_FREELIST_PAGE_READ,
- MONITOR_IBUF_BITMAP_PAGE_READ,
MONITOR_SYSTEM_PAGE_READ,
MONITOR_TRX_SYSTEM_PAGE_READ,
MONITOR_FSP_HDR_PAGE_READ,
@@ -244,12 +240,8 @@ enum monitor_id_t {
MONITOR_OTHER_PAGE_READ,
MONITOR_INDEX_LEAF_PAGE_WRITTEN,
MONITOR_INDEX_NON_LEAF_PAGE_WRITTEN,
- MONITOR_INDEX_IBUF_LEAF_PAGE_WRITTEN,
- MONITOR_INDEX_IBUF_NON_LEAF_PAGE_WRITTEN,
MONITOR_UNDO_LOG_PAGE_WRITTEN,
MONITOR_INODE_PAGE_WRITTEN,
- MONITOR_IBUF_FREELIST_PAGE_WRITTEN,
- MONITOR_IBUF_BITMAP_PAGE_WRITTEN,
MONITOR_SYSTEM_PAGE_WRITTEN,
MONITOR_TRX_SYSTEM_PAGE_WRITTEN,
MONITOR_FSP_HDR_PAGE_WRITTEN,
@@ -347,17 +339,6 @@ enum monitor_id_t {
MONITOR_MODULE_FIL_SYSTEM,
MONITOR_OVLD_N_FILE_OPENED,
- /* InnoDB Change Buffer related counters */
- MONITOR_MODULE_IBUF_SYSTEM,
- MONITOR_OVLD_IBUF_MERGE_INSERT,
- MONITOR_OVLD_IBUF_MERGE_DELETE,
- MONITOR_OVLD_IBUF_MERGE_PURGE,
- MONITOR_OVLD_IBUF_MERGE_DISCARD_INSERT,
- MONITOR_OVLD_IBUF_MERGE_DISCARD_DELETE,
- MONITOR_OVLD_IBUF_MERGE_DISCARD_PURGE,
- MONITOR_OVLD_IBUF_MERGES,
- MONITOR_OVLD_IBUF_SIZE,
-
/* Counters for server operations */
MONITOR_MODULE_SERVER,
MONITOR_MASTER_THREAD_SLEEP,
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index b85fa518384..52e5a724efd 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -165,9 +165,9 @@ extern char* srv_data_home;
recovery and open all tables in RO mode instead of RW mode. We don't
sync the max trx id to disk either. */
extern my_bool srv_read_only_mode;
-/** Set if InnoDB operates in read-only mode or innodb-force-recovery
-is greater than SRV_FORCE_NO_IBUF_MERGE. */
-extern my_bool high_level_read_only;
+/** Set if innodb_read_only is set or innodb_force_recovery
+is SRV_FORCE_NO_UNDO_LOG_SCAN or greater. */
+extern bool high_level_read_only;
/** store to its own file each table created by an user; data
dictionary tables are in the system tablespace 0 */
extern my_bool srv_file_per_table;
@@ -270,8 +270,6 @@ extern double srv_defragment_fill_factor;
extern uint srv_defragment_frequency;
extern ulonglong srv_defragment_interval;
-extern uint srv_change_buffer_max_size;
-
/* Number of IO operations per second the server can do */
extern ulong srv_io_capacity;
@@ -296,7 +294,7 @@ extern ulong srv_flushing_avg_loops;
extern ulong srv_force_recovery;
-/** innodb_fast_shutdown=1 skips purge and change buffer merge.
+/** innodb_fast_shutdown=1 skips purge.
innodb_fast_shutdown=2 effectively crashes the server (no log checkpoint).
innodb_fast_shutdown=3 is a clean shutdown that skips the rollback
of active transaction (to be done on restart). */
@@ -580,11 +578,6 @@ void srv_monitor_task(void*);
void srv_master_callback(void*);
-/**
-Complete the shutdown tasks such as background DROP TABLE,
-and optionally change buffer merge (on innodb_fast_shutdown=0). */
-void srv_shutdown(bool ibuf_merge);
-
} /* extern "C" */
#ifdef UNIV_DEBUG
diff --git a/storage/innobase/include/sux_lock.h b/storage/innobase/include/sux_lock.h
index 2c0167ac651..7a7f93b6787 100644
--- a/storage/innobase/include/sux_lock.h
+++ b/storage/innobase/include/sux_lock.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2020, 2022, MariaDB Corporation.
+Copyright (c) 2020, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -149,7 +149,7 @@ private:
#endif
public:
- /** In crash recovery or the change buffer, claim the ownership
+ /** In crash recovery, claim the ownership
of the exclusive block lock to the current thread */
void claim_ownership() { set_new_owner(pthread_self()); }
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index 152e794ac6a..81eb5471a7b 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2022, MariaDB Corporation.
+Copyright (c) 2015, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -778,13 +778,17 @@ public:
const char* op_info; /*!< English text describing the
current operation, or an empty
string */
- uint isolation_level;/*!< TRX_ISO_REPEATABLE_READ, ... */
- bool check_foreigns; /*!< normally TRUE, but if the user
- wants to suppress foreign key checks,
- (in table imports, for example) we
- set this FALSE */
+ /** TRX_ISO_REPEATABLE_READ, ... */
+ unsigned isolation_level:2;
+ /** normally set; "SET foreign_key_checks=0" can be issued to suppress
+ foreign key checks, in table imports, for example */
+ unsigned check_foreigns:1;
+ /** normally set; "SET unique_checks=0, foreign_key_checks=0"
+ enables bulk insert into an empty table */
+ unsigned check_unique_secondary:1;
+
/** whether an insert into an empty table is active */
- bool bulk_insert;
+ unsigned bulk_insert:1;
/*------------------------------*/
/* MySQL has a transaction coordinator to coordinate two phase
commit between multiple storage engines and the binary log. When
@@ -798,13 +802,6 @@ public:
/** whether this is holding the prepare mutex */
bool active_commit_ordered;
/*------------------------------*/
- bool check_unique_secondary;
- /*!< normally TRUE, but if the user
- wants to speed up inserts by
- suppressing unique key checks
- for secondary indexes when we decide
- if we can use the insert buffer for
- them, we set this FALSE */
bool flush_log_later;/* In 2PC, we hold the
prepare_commit mutex across
both phases. In that case, we
diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h
index 3474a903f6c..4728e7ef2bf 100644
--- a/storage/innobase/include/trx0undo.h
+++ b/storage/innobase/include/trx0undo.h
@@ -488,10 +488,10 @@ completely purged and trx_purge_free_segment() has started freeing it */
/** Transaction end identifier (if the log is in a history list),
or 0 if the transaction has not been committed */
#define TRX_UNDO_TRX_NO 8
-/** Before MariaDB 10.3.1, when purge did not reset DB_TRX_ID of
+/* Before MariaDB 10.3.1, when purge did not reset DB_TRX_ID of
surviving user records, this used to be called TRX_UNDO_DEL_MARKS.
-This field is redundant; it is only being read by some debug assertions.
+This field was removed in MariaDB 11.0.
The value 1 indicates that purge needs to process the undo log segment.
The value 0 indicates that all of it has been processed, and
@@ -500,7 +500,7 @@ trx_purge_free_segment() has been invoked, so the log is not safe to access.
Before MariaDB 10.3.1, a log segment may carry the value 0 even before
trx_purge_free_segment() was called, for those undo log records for
which purge would not result in removing delete-marked records. */
-#define TRX_UNDO_NEEDS_PURGE 16
+/*#define TRX_UNDO_NEEDS_PURGE 16*/
#define TRX_UNDO_LOG_START 18 /*!< Offset of the first undo log record
of this log on the header page; purge
may remove undo log record from the
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index d06343fcabe..93352b279b1 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
@@ -142,7 +142,6 @@ using the call command. */
assertions. */
#define UNIV_LRU_DEBUG /* debug the buffer pool LRU */
#define UNIV_HASH_DEBUG /* debug HASH_ macros */
-#define UNIV_IBUF_DEBUG /* debug the insert buffer */
#define UNIV_PERF_DEBUG /* debug flag that enables
light weight performance
related stuff. */
@@ -475,9 +474,6 @@ extern mysql_pfs_key_t fts_cache_mutex_key;
extern mysql_pfs_key_t fts_cache_init_mutex_key;
extern mysql_pfs_key_t fts_delete_mutex_key;
extern mysql_pfs_key_t fts_doc_id_mutex_key;
-extern mysql_pfs_key_t ibuf_bitmap_mutex_key;
-extern mysql_pfs_key_t ibuf_mutex_key;
-extern mysql_pfs_key_t ibuf_pessimistic_insert_mutex_key;
extern mysql_pfs_key_t recalc_pool_mutex_key;
extern mysql_pfs_key_t purge_sys_pq_mutex_key;
extern mysql_pfs_key_t recv_sys_mutex_key;
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index ff5508d489d..02c6649bc33 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Google Inc.
-Copyright (c) 2014, 2022, MariaDB Corporation.
+Copyright (c) 2014, 2023, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -401,6 +401,31 @@ void log_t::set_buffered(bool buffered)
}
#endif
+ /** Try to enable or disable durable writes (update log_write_through) */
+void log_t::set_write_through(bool write_through)
+{
+ if (is_pmem() || high_level_read_only)
+ return;
+ log_resize_acquire();
+ if (!resize_in_progress() && is_opened() &&
+ bool(log_write_through) != write_through)
+ {
+ os_file_close_func(log.m_file);
+ log.m_file= OS_FILE_CLOSED;
+ std::string path{get_log_file_path()};
+ log_write_through= write_through;
+ bool success;
+ log.m_file= os_file_create_func(path.c_str(),
+ OS_FILE_OPEN, OS_FILE_NORMAL, OS_LOG_FILE,
+ false, &success);
+ ut_a(log.m_file != OS_FILE_CLOSED);
+ sql_print_information(log_write_through
+ ? "InnoDB: Log writes write through"
+ : "InnoDB: Log writes may be cached");
+ }
+ log_resize_release();
+}
+
/** Start resizing the log and release the exclusive latch.
@param size requested new file_size
@return whether the resizing was started successfully */
@@ -852,7 +877,7 @@ bool log_t::flush(lsn_t lsn) noexcept
{
ut_ad(lsn >= get_flushed_lsn());
flush_lock.set_pending(lsn);
- const bool success{srv_file_flush_method == SRV_O_DSYNC || log.flush()};
+ const bool success{log_write_through || log.flush()};
if (UNIV_LIKELY(success))
{
flushed_to_disk_lsn.store(lsn, std::memory_order_release);
@@ -888,15 +913,6 @@ void log_write_up_to(lsn_t lsn, bool durable,
{
ut_ad(!srv_read_only_mode);
ut_ad(lsn != LSN_MAX);
-
- if (UNIV_UNLIKELY(recv_no_ibuf_operations))
- {
- /* A non-final batch of recovery is active no writes to the log
- are allowed yet. */
- ut_a(!callback);
- return;
- }
-
ut_ad(lsn <= log_sys.get_lsn());
#ifdef HAVE_PMEM
@@ -922,6 +938,7 @@ repeat:
if (write_lock.acquire(lsn, durable ? nullptr : callback) ==
group_commit_lock::ACQUIRED)
{
+ ut_ad(!recv_no_log_write || srv_operation != SRV_OPERATION_NORMAL);
log_sys.latch.wr_lock(SRW_LOCK_CALL);
pending_write_lsn= write_lock.release(log_sys.write_buf<true>());
}
@@ -1054,11 +1071,9 @@ ATTRIBUTE_COLD void logs_empty_and_mark_files_at_shutdown()
ib::info() << "Starting shutdown...";
- /* Wait until the master thread and all other operations are idle: our
+ /* Wait until the master task and all other operations are idle: our
algorithm only works if the server is idle at shutdown */
- bool do_srv_shutdown = false;
if (srv_master_timer) {
- do_srv_shutdown = srv_fast_shutdown < 2;
srv_master_timer.reset();
}
@@ -1075,11 +1090,6 @@ ATTRIBUTE_COLD void logs_empty_and_mark_files_at_shutdown()
}
srv_monitor_timer.reset();
- if (do_srv_shutdown) {
- srv_shutdown(srv_fast_shutdown == 0);
- }
-
-
loop:
ut_ad(lock_sys.is_initialised() || !srv_was_started);
ut_ad(log_sys.is_initialised() || !srv_was_started);
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index 793f7b327c8..3443369af6c 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -46,7 +46,6 @@ Created 9/20/1997 Heikki Tuuri
#include "page0page.h"
#include "page0cur.h"
#include "trx0undo.h"
-#include "ibuf0ibuf.h"
#include "trx0undo.h"
#include "trx0rec.h"
#include "fil0fil.h"
@@ -71,17 +70,6 @@ number (FIL_PAGE_LSN) is in the future. Initially FALSE, and set by
recv_recovery_from_checkpoint_start(). */
bool recv_lsn_checks_on;
-/** If the following is TRUE, the buffer pool file pages must be invalidated
-after recovery and no ibuf operations are allowed; this becomes TRUE if
-the log record hash table becomes too full, and log records must be merged
-to file pages already before the recovery is finished: in this case no
-ibuf operations are allowed, as they could modify the pages read in the
-buffer pool before the pages have been recovered to the up-to-date state.
-
-true means that recovery is running and no operations on the log file
-are allowed yet: the variable name is misleading. */
-bool recv_no_ibuf_operations;
-
/** The maximum lsn we see for a page during the recovery process. If this
is bigger than the lsn we are able to scan up to, that is an indication that
the recovery failed and the database may be corrupt. */
@@ -739,7 +727,7 @@ static struct
retry:
log_sys.latch.wr_unlock();
bool fail= false;
- buf_block_t *free_block= buf_LRU_get_free_block(false);
+ buf_block_t *free_block= buf_LRU_get_free_block(have_no_mutex);
log_sys.latch.wr_lock(SRW_LOCK_CALL);
mysql_mutex_lock(&recv_sys.mutex);
@@ -1026,22 +1014,13 @@ FIXME: Rely on recv_sys.pages! */
class mlog_init_t
{
public:
- /** A page initialization operation that was parsed from
- the redo log */
- struct init {
- /** log sequence number of the page initialization */
- lsn_t lsn;
- /** Whether btr_page_create() avoided a read of the page.
-
- At the end of the last recovery batch, mark_ibuf_exist()
- will mark pages for which this flag is set. */
- bool created;
- };
+ /** log sequence number of the page initialization */
+ lsn_t lsn;
private:
- typedef std::map<const page_id_t, init,
+ typedef std::map<const page_id_t, lsn_t,
std::less<const page_id_t>,
- ut_allocator<std::pair<const page_id_t, init> > >
+ ut_allocator<std::pair<const page_id_t, lsn_t> > >
map;
/** Map of page initialization operations.
FIXME: Merge this to recv_sys.pages! */
@@ -1054,13 +1033,11 @@ public:
bool add(const page_id_t page_id, lsn_t lsn)
{
mysql_mutex_assert_owner(&recv_sys.mutex);
- const init init = { lsn, false };
- std::pair<map::iterator, bool> p = inits.insert(
- map::value_type(page_id, init));
- ut_ad(!p.first->second.created);
+ std::pair<map::iterator, bool> p = inits.emplace(
+ map::value_type(page_id, lsn));
if (p.second) return true;
- if (p.first->second.lsn >= init.lsn) return false;
- p.first->second = init;
+ if (p.first->second >= lsn) return false;
+ p.first->second = lsn;
return true;
}
@@ -1070,7 +1047,7 @@ public:
@param[in,out] init initialize log or load log
@return the latest page initialization;
not valid after releasing recv_sys.mutex. */
- init& last(page_id_t page_id)
+ lsn_t last(page_id_t page_id)
{
mysql_mutex_assert_owner(&recv_sys.mutex);
return inits.find(page_id)->second;
@@ -1084,69 +1061,7 @@ public:
{
mysql_mutex_assert_owner(&recv_sys.mutex);
auto i= inits.find(page_id);
- return i != inits.end() && i->second.lsn > lsn;
- }
-
- /** At the end of each recovery batch, reset the 'created' flags. */
- void reset()
- {
- mysql_mutex_assert_owner(&recv_sys.mutex);
- ut_ad(recv_no_ibuf_operations);
- for (map::value_type& i : inits) {
- i.second.created = false;
- }
- }
-
- /** On the last recovery batch, mark whether there exist
- buffered changes for the pages that were initialized
- by buf_page_create() and still reside in the buffer pool.
- @param[in,out] mtr dummy mini-transaction */
- void mark_ibuf_exist(mtr_t& mtr)
- {
- mysql_mutex_assert_owner(&recv_sys.mutex);
- mtr.start();
-
- for (const map::value_type& i : inits) {
- if (!i.second.created) {
- continue;
- }
- if (buf_block_t* block = buf_page_get_low(
- i.first, 0, RW_X_LATCH, nullptr,
- BUF_GET_IF_IN_POOL,
- &mtr, nullptr, false)) {
- if (UNIV_LIKELY_NULL(block->page.zip.data)) {
- switch (fil_page_get_type(
- block->page.zip.data)) {
- case FIL_PAGE_INDEX:
- case FIL_PAGE_RTREE:
- if (page_zip_decompress(
- &block->page.zip,
- block->page.frame,
- true)) {
- break;
- }
- ib::error() << "corrupted "
- << block->page.id();
- }
- }
- if (recv_no_ibuf_operations) {
- mtr.commit();
- mtr.start();
- continue;
- }
- mysql_mutex_unlock(&recv_sys.mutex);
- if (ibuf_page_exists(block->page.id(),
- block->zip_size())) {
- block->page.set_ibuf_exist();
- }
- mtr.commit();
- mtr.start();
- mysql_mutex_lock(&recv_sys.mutex);
- }
- }
-
- mtr.commit();
- clear();
+ return i != inits.end() && i->second > lsn;
}
/** Clear the data structure */
@@ -2890,19 +2805,17 @@ lsn of a log record.
@param[in,out] mtr mini-transaction
@param[in,out] p recovery address
@param[in,out] space tablespace, or NULL if not looked up yet
-@param[in,out] init page initialization operation, or NULL
+@param[in,out] init_lsn page initialization LSN, or 0
@return the recovered page
@retval nullptr on failure */
static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr,
const recv_sys_t::map::iterator &p,
fil_space_t *space= nullptr,
- mlog_init_t::init *init= nullptr)
+ lsn_t init_lsn= 0)
{
mysql_mutex_assert_owner(&recv_sys.mutex);
ut_ad(recv_sys.apply_log_recs);
ut_ad(recv_needed_recovery);
- ut_ad(!init || init->created);
- ut_ad(!init || init->lsn);
ut_ad(block->page.id() == p->first);
ut_ad(!p->second.is_being_processed());
ut_ad(!space || space->id == block->page.id().space());
@@ -2923,13 +2836,12 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr,
byte *frame = UNIV_LIKELY_NULL(block->page.zip.data)
? block->page.zip.data
: block->page.frame;
- const lsn_t page_lsn = init
+ const lsn_t page_lsn = init_lsn
? 0
: mach_read_from_8(frame + FIL_PAGE_LSN);
bool free_page = false;
lsn_t start_lsn = 0, end_lsn = 0;
ut_d(lsn_t recv_start_lsn = 0);
- const lsn_t init_lsn = init ? init->lsn : 0;
bool skipped_after_init = false;
@@ -3057,8 +2969,7 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr,
set_start_lsn:
if ((a == log_phys_t::APPLIED_CORRUPTED
|| recv_sys.is_corrupt_log()) && !srv_force_recovery) {
- if (init) {
- init->created = false;
+ if (init_lsn) {
if (space || block->page.id().page_no()) {
block->page.lock.x_lock_recursive();
}
@@ -3098,12 +3009,9 @@ set_start_lsn:
UT_LIST_ADD_FIRST(buf_pool.flush_list, &block->page);
buf_pool.page_cleaner_wakeup();
mysql_mutex_unlock(&buf_pool.flush_list_mutex);
- } else if (free_page && init) {
+ } else if (free_page && init_lsn) {
/* There have been no operations that modify the page.
- Any buffered changes must not be merged. A subsequent
- buf_page_create() from a user thread should discard
- any buffered changes. */
- init->created = false;
+ Any buffered changes will be merged in ibuf_upgrade(). */
ut_ad(!mtr.has_modifications());
block->page.set_freed(block->page.state());
}
@@ -3237,10 +3145,12 @@ func_exit:
}
/** Read pages for which log needs to be applied.
-@param page_id first page identifier to read
-@param i iterator to recv_sys.pages */
+@param page_id first page identifier to read
+@param i iterator to recv_sys.pages
+@param last_batch whether it is possible to write more redo log */
TRANSACTIONAL_TARGET
-static void recv_read_in_area(page_id_t page_id, recv_sys_t::map::iterator i)
+static void recv_read_in_area(page_id_t page_id, recv_sys_t::map::iterator i,
+ bool last_batch)
{
uint32_t page_nos[32];
ut_ad(page_id == i->first);
@@ -3260,7 +3170,9 @@ static void recv_read_in_area(page_id_t page_id, recv_sys_t::map::iterator i)
if (p != page_nos)
{
mysql_mutex_unlock(&recv_sys.mutex);
+ if (!last_batch) log_sys.latch.wr_unlock();
buf_read_recv_pages(page_id.space(), {page_nos, p});
+ if (!last_batch) log_sys.latch.wr_lock(SRW_LOCK_CALL);
mysql_mutex_lock(&recv_sys.mutex);
}
}
@@ -3282,11 +3194,11 @@ inline buf_block_t *recv_sys_t::recover_low(const page_id_t page_id,
page_recv_t &recs= p->second;
ut_ad(recs.state == page_recv_t::RECV_WILL_NOT_READ);
buf_block_t* block= nullptr;
- mlog_init_t::init &i= mlog_init.last(page_id);
+ const lsn_t init_lsn= mlog_init.last(page_id);
const lsn_t end_lsn= recs.log.last()->lsn;
- if (end_lsn < i.lsn)
+ if (end_lsn < init_lsn)
DBUG_LOG("ib_log", "skip log for page " << page_id
- << " LSN " << end_lsn << " < " << i.lsn);
+ << " LSN " << end_lsn << " < " << init_lsn);
fil_space_t *space= fil_space_t::get(page_id.space());
mtr.start();
@@ -3326,9 +3238,8 @@ inline buf_block_t *recv_sys_t::recover_low(const page_id_t page_id,
}
ut_ad(&recs == &pages.find(page_id)->second);
- i.created= true;
map::iterator r= p++;
- block= recv_recover_page(block, mtr, r, space, &i);
+ block= recv_recover_page(block, mtr, r, space, init_lsn);
ut_ad(mtr.has_committed());
if (block)
@@ -3354,7 +3265,7 @@ inline buf_block_t *recv_sys_t::recover_low(const page_id_t page_id,
@retval nullptr if the page cannot be initialized based on log records */
buf_block_t *recv_sys_t::recover_low(const page_id_t page_id)
{
- buf_block_t *free_block= buf_LRU_get_free_block(false);
+ buf_block_t *free_block= buf_LRU_get_free_block(have_no_mutex);
buf_block_t *block= nullptr;
mysql_mutex_lock(&mutex);
@@ -3441,10 +3352,6 @@ void recv_sys_t::apply(bool last_batch)
}
}
- recv_no_ibuf_operations = !last_batch ||
- srv_operation == SRV_OPERATION_RESTORE ||
- srv_operation == SRV_OPERATION_RESTORE_EXPORT;
-
mtr_t mtr;
if (!pages.empty())
@@ -3492,7 +3399,7 @@ void recv_sys_t::apply(bool last_batch)
if (!last_batch)
log_sys.latch.wr_unlock();
- buf_block_t *free_block= buf_LRU_get_free_block(false);
+ buf_block_t *free_block= buf_LRU_get_free_block(have_no_mutex);
if (!last_batch)
log_sys.latch.wr_lock(SRW_LOCK_CALL);
@@ -3545,7 +3452,7 @@ next_free_block:
mysql_mutex_unlock(&mutex);
if (!last_batch)
log_sys.latch.wr_unlock();
- free_block= buf_LRU_get_free_block(false);
+ free_block= buf_LRU_get_free_block(have_no_mutex);
if (!last_batch)
log_sys.latch.wr_lock(SRW_LOCK_CALL);
mysql_mutex_lock(&mutex);
@@ -3554,7 +3461,7 @@ next_free_block:
ut_ad(p == pages.end() || p->first > page_id);
continue;
case page_recv_t::RECV_NOT_PROCESSED:
- recv_read_in_area(page_id, p);
+ recv_read_in_area(page_id, p, last_batch);
}
p= pages.lower_bound(page_id);
/* Ensure that progress will be made. */
@@ -3607,14 +3514,8 @@ next_free_block:
}
}
- if (last_batch)
- /* We skipped this in buf_page_create(). */
- mlog_init.mark_ibuf_exist(mtr);
- else
- {
- mlog_init.reset();
+ if (!last_batch)
log_sys.latch.wr_unlock();
- }
mysql_mutex_unlock(&mutex);
@@ -4330,7 +4231,6 @@ err_exit:
mysql_mutex_lock(&recv_sys.mutex);
recv_sys.apply_log_recs = true;
- recv_no_ibuf_operations = false;
ut_d(recv_no_log_write = srv_operation == SRV_OPERATION_RESTORE
|| srv_operation == SRV_OPERATION_RESTORE_EXPORT);
if (srv_operation == SRV_OPERATION_NORMAL) {
diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc
index b866460feb5..6d31a55e8ed 100644
--- a/storage/innobase/mtr/mtr0mtr.cc
+++ b/storage/innobase/mtr/mtr0mtr.cc
@@ -177,7 +177,6 @@ void mtr_t::start()
m_made_dirty= false;
m_latch_ex= false;
- m_inside_ibuf= false;
m_modifications= false;
m_log_mode= MTR_LOG_ALL;
ut_d(m_user_space_id= TRX_SYS_SPACE);
@@ -309,7 +308,6 @@ void mtr_t::release()
void mtr_t::commit()
{
ut_ad(is_active());
- ut_ad(!is_inside_ibuf());
/* This is a dirty read, for debugging. */
ut_ad(!m_modifications || !recv_no_log_write);
@@ -494,7 +492,6 @@ void mtr_t::rollback_to_savepoint(ulint begin, ulint end)
void mtr_t::commit_shrink(fil_space_t &space)
{
ut_ad(is_active());
- ut_ad(!is_inside_ibuf());
ut_ad(!high_level_read_only);
ut_ad(m_modifications);
ut_ad(m_made_dirty);
@@ -601,7 +598,6 @@ void mtr_t::commit_shrink(fil_space_t &space)
bool mtr_t::commit_file(fil_space_t &space, const char *name)
{
ut_ad(is_active());
- ut_ad(!is_inside_ibuf());
ut_ad(!high_level_read_only);
ut_ad(m_modifications);
ut_ad(!m_made_dirty);
@@ -712,7 +708,6 @@ lsn_t mtr_t::commit_files(lsn_t checkpoint_lsn)
ut_ad(log_sys.latch.is_write_locked());
#endif
ut_ad(is_active());
- ut_ad(!is_inside_ibuf());
ut_ad(m_log_mode == MTR_LOG_ALL);
ut_ad(!m_made_dirty);
ut_ad(m_memo.empty());
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index d4cfb6207bf..98bf4fdb8ca 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -65,7 +65,9 @@ Created 10/21/1995 Heikki Tuuri
#endif /* HAVE_FALLOC_PUNCH_HOLE_AND_KEEP_SIZE */
#ifdef _WIN32
-#include <winioctl.h>
+# include <winioctl.h>
+#elif !defined O_DSYNC
+# define O_DSYNC O_SYNC
#endif
// my_test_if_atomic_write() , my_win_secattr()
@@ -931,6 +933,8 @@ bool
os_file_flush_func(
os_file_t file)
{
+ if (UNIV_UNLIKELY(my_disable_sync)) return true;
+
int ret;
ret = os_file_sync_posix(file);
@@ -981,40 +985,19 @@ os_file_create_simple_func(
*success = false;
- int create_flag;
- const char* mode_str = NULL;
+ int create_flag = O_RDONLY;
ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT));
ut_a(!(create_mode & OS_FILE_ON_ERROR_NO_EXIT));
- if (create_mode == OS_FILE_OPEN) {
- mode_str = "OPEN";
-
- if (access_type == OS_FILE_READ_ONLY) {
-
- create_flag = O_RDONLY;
-
- } else if (read_only) {
-
- create_flag = O_RDONLY;
-
- } else {
+ if (read_only) {
+ } else if (create_mode == OS_FILE_OPEN) {
+ if (access_type != OS_FILE_READ_ONLY) {
create_flag = O_RDWR;
}
-
- } else if (read_only) {
-
- mode_str = "OPEN";
- create_flag = O_RDONLY;
-
} else if (create_mode == OS_FILE_CREATE) {
-
- mode_str = "CREATE";
create_flag = O_RDWR | O_CREAT | O_EXCL;
-
} else if (create_mode == OS_FILE_CREATE_PATH) {
-
- mode_str = "CREATE PATH";
/* Create subdirs along the path if needed. */
*success = os_file_create_subdirs_if_needed(name);
@@ -1040,40 +1023,38 @@ os_file_create_simple_func(
return(OS_FILE_CLOSED);
}
- bool retry;
+ create_flag |= O_CLOEXEC;
+ if (fil_system.is_write_through()) create_flag |= O_DSYNC;
+#ifdef O_DIRECT
+ int direct_flag = fil_system.is_buffered() ? 0 : O_DIRECT;
+#else
+ constexpr int direct_flag = 0;
+#endif
- do {
- file = open(name, create_flag | O_CLOEXEC, os_innodb_umask);
+ for (;;) {
+ file = open(name, create_flag | direct_flag, os_innodb_umask);
if (file == -1) {
+#ifdef O_DIRECT
+ if (direct_flag && errno == EINVAL) {
+ direct_flag = 0;
+ continue;
+ }
+#endif
+
*success = false;
- retry = os_file_handle_error(
+ if (!os_file_handle_error(
name,
create_mode == OS_FILE_OPEN
- ? "open" : "create");
+ ? "open" : "create")) {
+ break;
+ }
} else {
*success = true;
- retry = false;
- }
-
- } while (retry);
-
- /* This function is always called for data files, we should disable
- OS caching (O_DIRECT) here as we do in os_file_create_func(), so
- we open the same file in the same mode, see man page of open(2). */
- if (!srv_read_only_mode && *success) {
- switch (srv_file_flush_method) {
- case SRV_O_DSYNC:
- case SRV_O_DIRECT:
- case SRV_O_DIRECT_NO_FSYNC:
- os_file_set_nocache(file, name, mode_str);
- break;
- default:
break;
}
}
-#ifndef _WIN32
if (!read_only
&& *success
&& access_type == OS_FILE_READ_WRITE
@@ -1084,7 +1065,6 @@ os_file_create_simple_func(
close(file);
file = -1;
}
-#endif /* !_WIN32 */
return(file);
}
@@ -1156,8 +1136,10 @@ os_file_create_func(
return(OS_FILE_CLOSED);
);
- int create_flag;
- const char* mode_str = NULL;
+ int create_flag = O_RDONLY | O_CLOEXEC;
+#ifdef O_DIRECT
+ const char* mode_str = "OPEN";
+#endif
on_error_no_exit = create_mode & OS_FILE_ON_ERROR_NO_EXIT
? true : false;
@@ -1167,30 +1149,21 @@ os_file_create_func(
create_mode &= ulint(~(OS_FILE_ON_ERROR_NO_EXIT
| OS_FILE_ON_ERROR_SILENT));
- if (create_mode == OS_FILE_OPEN
- || create_mode == OS_FILE_OPEN_RAW
- || create_mode == OS_FILE_OPEN_RETRY) {
-
- mode_str = "OPEN";
-
- create_flag = read_only ? O_RDONLY : O_RDWR;
-
- } else if (read_only) {
-
- mode_str = "OPEN";
-
- create_flag = O_RDONLY;
-
+ if (read_only) {
+ } else if (create_mode == OS_FILE_OPEN
+ || create_mode == OS_FILE_OPEN_RAW
+ || create_mode == OS_FILE_OPEN_RETRY) {
+ create_flag = O_RDWR | O_CLOEXEC;
} else if (create_mode == OS_FILE_CREATE) {
-
+#ifdef O_DIRECT
mode_str = "CREATE";
- create_flag = O_RDWR | O_CREAT | O_EXCL;
-
+#endif
+ create_flag = O_RDWR | O_CREAT | O_EXCL | O_CLOEXEC;
} else if (create_mode == OS_FILE_OVERWRITE) {
-
+#ifdef O_DIRECT
mode_str = "OVERWRITE";
- create_flag = O_RDWR | O_CREAT | O_TRUNC;
-
+#endif
+ create_flag = O_RDWR | O_CREAT | O_TRUNC | O_CLOEXEC;
} else {
ib::error()
<< "Unknown file create mode (" << create_mode << ")"
@@ -1205,25 +1178,36 @@ os_file_create_func(
ut_a(purpose == OS_FILE_AIO || purpose == OS_FILE_NORMAL);
- /* We let O_DSYNC only affect log files */
+ create_flag |= O_CLOEXEC;
- if (!read_only
- && type == OS_LOG_FILE
- && srv_file_flush_method == SRV_O_DSYNC) {
-#ifdef O_DSYNC
- create_flag |= O_DSYNC;
+#ifdef O_DIRECT
+ int direct_flag = type == OS_DATA_FILE && create_mode != OS_FILE_CREATE
+ && !fil_system.is_buffered()
+ ? O_DIRECT : 0;
#else
- create_flag |= O_SYNC;
+ constexpr int direct_flag = 0;
#endif
+
+ if (read_only) {
+ } else if ((type == OS_LOG_FILE)
+ ? log_sys.log_write_through
+ : fil_system.is_write_through()) {
+ create_flag |= O_DSYNC;
}
os_file_t file;
- bool retry;
- do {
- file = open(name, create_flag | O_CLOEXEC, os_innodb_umask);
+ for (;;) {
+ file = open(name, create_flag | direct_flag, os_innodb_umask);
if (file == -1) {
+#ifdef O_DIRECT
+ if (direct_flag && errno == EINVAL) {
+ direct_flag = 0;
+ continue;
+ }
+#endif
+
const char* operation;
operation = (create_mode == OS_FILE_CREATE
@@ -1232,39 +1216,30 @@ os_file_create_func(
*success = false;
if (on_error_no_exit) {
- retry = os_file_handle_error_no_exit(
- name, operation, on_error_silent);
+ if (os_file_handle_error_no_exit(
+ name, operation, on_error_silent))
+ continue;
} else {
- retry = os_file_handle_error(name, operation);
+ if (os_file_handle_error(name, operation))
+ continue;
}
+
+ return file;
} else {
*success = true;
- retry = false;
+ break;
}
-
- } while (retry);
-
- if (!*success) {
- return file;
}
#if (defined __sun__ && defined DIRECTIO_ON) || defined O_DIRECT
- if (type == OS_DATA_FILE) {
- switch (srv_file_flush_method) {
- case SRV_O_DSYNC:
- case SRV_O_DIRECT:
- case SRV_O_DIRECT_NO_FSYNC:
+ if (type == OS_DATA_FILE && create_mode == OS_FILE_CREATE
+ && !fil_system.is_buffered()) {
# ifdef __linux__
use_o_direct:
# endif
- os_file_set_nocache(file, name, mode_str);
- break;
- default:
- break;
- }
- }
+ os_file_set_nocache(file, name, mode_str);
# ifdef __linux__
- else if (type == OS_LOG_FILE && !log_sys.is_opened()) {
+ } else if (type == OS_LOG_FILE && !log_sys.is_opened()) {
struct stat st;
char b[20 + sizeof "/sys/dev/block/" ":"
"/../queue/physical_block_size"];
@@ -1316,11 +1291,10 @@ skip_o_direct:
log_sys.log_buffered= true;
log_sys.set_block_size(512);
}
- }
# endif
+ }
#endif
-#ifndef _WIN32
if (!read_only
&& create_mode != OS_FILE_OPEN_RAW
&& !my_disable_locking
@@ -1348,7 +1322,6 @@ skip_o_direct:
close(file);
file = -1;
}
-#endif /* !_WIN32 */
return(file);
}
@@ -1786,6 +1759,9 @@ Flushes the write buffers of a given file to the disk.
@return true if success */
bool os_file_flush_func(os_file_t file)
{
+ if (UNIV_UNLIKELY(my_disable_sync))
+ return true;
+
++os_n_fsyncs;
static bool disable_datasync;
@@ -2011,6 +1987,11 @@ os_file_create_simple_func(
return(OS_FILE_CLOSED);
}
+ if (fil_system.is_write_through())
+ attributes |= FILE_FLAG_WRITE_THROUGH;
+ if (!fil_system.is_buffered())
+ attributes |= FILE_FLAG_NO_BUFFERING;
+
bool retry;
do {
@@ -2182,27 +2163,16 @@ os_file_create_func(
if (!log_sys.is_opened() && !log_sys.log_buffered) {
attributes|= FILE_FLAG_NO_BUFFERING;
}
- if (srv_file_flush_method == SRV_O_DSYNC)
+ if (log_sys.log_write_through)
attributes|= FILE_FLAG_WRITE_THROUGH;
- }
- else if (type == OS_DATA_FILE)
- {
- switch (srv_file_flush_method)
- {
- case SRV_FSYNC:
- case SRV_LITTLESYNC:
- case SRV_NOSYNC:
- break;
- default:
+ } else {
+ if (type == OS_DATA_FILE && !fil_system.is_buffered())
attributes|= FILE_FLAG_NO_BUFFERING;
- }
+ if (fil_system.is_write_through())
+ attributes|= FILE_FLAG_WRITE_THROUGH;
}
- DWORD access = GENERIC_READ;
-
- if (!read_only) {
- access |= GENERIC_WRITE;
- }
+ DWORD access = read_only ? GENERIC_READ : GENERIC_READ | GENERIC_WRITE;
for (;;) {
const char *operation;
diff --git a/storage/innobase/page/page0cur.cc b/storage/innobase/page/page0cur.cc
index b019694b9f6..8d3a44d630d 100644
--- a/storage/innobase/page/page0cur.cc
+++ b/storage/innobase/page/page0cur.cc
@@ -2,7 +2,7 @@
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2018, 2022, MariaDB Corporation.
+Copyright (c) 2018, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1369,8 +1369,7 @@ page_cur_insert_rec_low(
ut_ad(!!page_is_comp(block->page.frame) == !!rec_offs_comp(offsets));
ut_ad(fil_page_index_page_check(block->page.frame));
ut_ad(mach_read_from_8(PAGE_HEADER + PAGE_INDEX_ID + block->page.frame) ==
- index->id ||
- mtr->is_inside_ibuf());
+ index->id || index->is_dummy);
ut_ad(page_dir_get_n_slots(block->page.frame) >= 2);
ut_ad(!page_rec_is_supremum(cur->rec));
@@ -1769,11 +1768,6 @@ static inline void page_zip_dir_add_slot(buf_block_t *block,
Inserts a record next to page cursor on a compressed and uncompressed
page.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if this is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
@return pointer to inserted record
@return nullptr on failure */
rec_t*
@@ -1797,8 +1791,7 @@ page_cur_insert_rec_zip(
ut_ad(rec_offs_comp(offsets));
ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX ||
fil_page_get_type(page) == FIL_PAGE_RTREE);
- ut_ad(mach_read_from_8(PAGE_HEADER + PAGE_INDEX_ID + page) ==
- index->id || mtr->is_inside_ibuf());
+ ut_ad(mach_read_from_8(PAGE_HEADER + PAGE_INDEX_ID + page) == index->id);
ut_ad(!page_get_instant(page));
ut_ad(!page_cur_is_after_last(cursor));
#ifdef UNIV_ZIP_DEBUG
@@ -2265,8 +2258,7 @@ page_cur_delete_rec(
== index->table->not_redundant());
ut_ad(fil_page_index_page_check(block->page.frame));
ut_ad(mach_read_from_8(PAGE_HEADER + PAGE_INDEX_ID + block->page.frame)
- == index->id
- || mtr->is_inside_ibuf());
+ == index->id);
ut_ad(mtr->is_named_space(index->table->space));
/* The record must not be the supremum or infimum record. */
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index 258d47a5451..1060e702db4 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -2,7 +2,7 @@
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -336,17 +336,13 @@ page_create_zip(
/* PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC are always 0 for
temporary tables. */
ut_ad(max_trx_id == 0 || !index->table->is_temporary());
- /* In secondary indexes and the change buffer, PAGE_MAX_TRX_ID
+ /* In secondary indexes, PAGE_MAX_TRX_ID
must be zero on non-leaf pages. max_trx_id can be 0 when the
- index consists of an empty root (leaf) page. */
- ut_ad(max_trx_id == 0
- || level == 0
- || !dict_index_is_sec_or_ibuf(index)
- || index->table->is_temporary());
- /* In the clustered index, PAGE_ROOT_AUTOINC or
+ index consists of an empty root (leaf) page.
+
+ the clustered index, PAGE_ROOT_AUTOINC or
PAGE_MAX_TRX_ID must be 0 on other pages than the root. */
- ut_ad(level == 0 || max_trx_id == 0
- || !dict_index_is_sec_or_ibuf(index)
+ ut_ad(max_trx_id == 0 || level == 0 || index->is_primary()
|| index->table->is_temporary());
buf_block_modify_clock_inc(block);
@@ -390,8 +386,7 @@ page_create_empty(
same temp-table in parallel.
max_trx_id is ignored for temp tables because it not required
for MVCC. */
- if (dict_index_is_sec_or_ibuf(index)
- && !index->table->is_temporary()
+ if (!index->is_primary() && !index->table->is_temporary()
&& page_is_leaf(block->page.frame)) {
max_trx_id = page_get_max_trx_id(block->page.frame);
ut_ad(max_trx_id);
@@ -435,11 +430,6 @@ page_create_empty(
Differs from page_copy_rec_list_end, because this function does not
touch the lock table and max trx id on page or compress the page.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if new_block is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
@return error code */
dberr_t
page_copy_rec_list_end_no_locks(
@@ -507,11 +497,6 @@ Copies records from page to new_page, from a given record onward,
including that record. Infimum and supremum records are not copied.
The records are copied to the start of the record list on new_page.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if new_block is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_t::commit().
-
@return pointer to the original successor of the infimum record on new_block
@retval nullptr on ROW_FORMAT=COMPRESSED page overflow */
rec_t*
@@ -603,8 +588,7 @@ err_exit:
same temp-table in parallel.
max_trx_id is ignored for temp tables because it not required
for MVCC. */
- if (dict_index_is_sec_or_ibuf(index)
- && page_is_leaf(page)
+ if (!index->is_primary() && page_is_leaf(page)
&& !index->table->is_temporary()) {
ut_ad(!was_empty || page_dir_get_n_heap(new_page)
== PAGE_HEAP_NO_USER_LOW
@@ -677,11 +661,6 @@ Copies records from page to new_page, up to the given record,
NOT including that record. Infimum and supremum records are not copied.
The records are copied to the end of the record list on new_page.
-IMPORTANT: The caller will have to update IBUF_BITMAP_FREE
-if new_block is a compressed leaf page in a secondary index.
-This has to be done either within the same mini-transaction,
-or by invoking ibuf_reset_free_bits() before mtr_commit().
-
@return pointer to the original predecessor of the supremum record on new_block
@retval nullptr on ROW_FORMAT=COMPRESSED page overflow */
rec_t*
@@ -2057,7 +2036,7 @@ func_exit2:
max_trx_id is ignored for temp tables because it not required
for MVCC. */
if (!page_is_leaf(page) || page_is_empty(page)
- || !dict_index_is_sec_or_ibuf(index)
+ || index->is_primary()
|| index->table->is_temporary()) {
} else if (trx_id_t sys_max_trx_id = trx_sys.get_max_trx_id()) {
trx_id_t max_trx_id = page_get_max_trx_id(page);
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index 56b58dd87d0..aff01764be6 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -2,7 +2,7 @@
Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2014, 2022, MariaDB Corporation.
+Copyright (c) 2014, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -408,8 +408,6 @@ inline void mtr_t::zmemcpy(const buf_block_t &b, void *dest, const void *str,
static void page_zip_compress_write_log(buf_block_t *block,
dict_index_t *index, mtr_t *mtr)
{
- ut_ad(!index->is_ibuf());
-
if (!mtr->is_logged())
return;
@@ -463,8 +461,7 @@ page_zip_get_n_prev_extern(
ut_ad(page_is_leaf(page));
ut_ad(page_is_comp(page));
ut_ad(dict_table_is_comp(index->table));
- ut_ad(dict_index_is_clust(index));
- ut_ad(!dict_index_is_ibuf(index));
+ ut_ad(index->is_primary());
heap_no = rec_get_heap_no_new(rec);
ut_ad(heap_no >= PAGE_HEAP_NO_USER_LOW);
@@ -1282,7 +1279,6 @@ page_zip_compress(
ut_ad(page_simple_validate_new((page_t*) page));
ut_ad(page_zip_simple_validate(page_zip));
ut_ad(dict_table_is_comp(index->table));
- ut_ad(!dict_index_is_ibuf(index));
MEM_CHECK_DEFINED(page, srv_page_size);
@@ -4374,10 +4370,6 @@ Reorganize and compress a page. This is a low-level operation for
compressed pages, to be used when page_zip_compress() fails.
On success, redo log will be written.
The function btr_page_reorganize() should be preferred whenever possible.
-IMPORTANT: if page_zip_reorganize() is invoked on a leaf page of a
-non-clustered index, the caller must update the insert buffer free
-bits in the same mini-transaction in such a way that the modification
-will be redo-logged.
@return error code
@retval DB_FAIL on overflow; the block_zip will be left intact */
dberr_t
@@ -4398,7 +4390,6 @@ page_zip_reorganize(
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(block->page.zip.data);
ut_ad(page_is_comp(page));
- ut_ad(!dict_index_is_ibuf(index));
ut_ad(!index->table->is_temporary());
/* Note that page_zip_validate(page_zip, page, index) may fail here. */
MEM_CHECK_DEFINED(page, srv_page_size);
@@ -4505,7 +4496,6 @@ page_zip_copy_recs(
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_page_flagged(src, MTR_MEMO_PAGE_X_FIX));
- ut_ad(!dict_index_is_ibuf(index));
ut_ad(!index->table->is_temporary());
#ifdef UNIV_ZIP_DEBUG
/* The B-tree operations that call this function may set
diff --git a/storage/innobase/rem/rem0cmp.cc b/storage/innobase/rem/rem0cmp.cc
index c2b2bc7120d..e48cad01530 100644
--- a/storage/innobase/rem/rem0cmp.cc
+++ b/storage/innobase/rem/rem0cmp.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2020, 2022, MariaDB Corporation.
+Copyright (c) 2020, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -401,8 +401,8 @@ int cmp_dtuple_rec_with_match_low(const dtuple_t *dtuple, const rec_t *rec,
ut_ad(!dfield_is_ext(dtuple_field));
- ret = cmp_data(type->mtype, type->prtype, !index->is_ibuf()
- && index->fields[cur_field].descending,
+ ret = cmp_data(type->mtype, type->prtype,
+ index->fields[cur_field].descending,
dtuple_b_ptr, dtuple_f_len,
rec_b_ptr, rec_f_len);
if (ret) {
@@ -480,7 +480,6 @@ cmp_dtuple_rec_with_match_bytes(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!(REC_INFO_MIN_REC_FLAG
& dtuple_get_info_bits(dtuple)));
- ut_ad(!index->is_ibuf());
if (UNIV_UNLIKELY(REC_INFO_MIN_REC_FLAG
& rec_get_info_bits(rec, rec_offs_comp(offsets)))) {
@@ -832,32 +831,21 @@ cmp_rec_rec(
dict_index_get_n_unique_in_tree(index));
for (; cur_field < n_fields; cur_field++) {
- ulint mtype;
- ulint prtype;
- bool descending;
-
- if (UNIV_UNLIKELY(dict_index_is_ibuf(index))) {
- /* This is for the insert buffer B-tree. */
- mtype = DATA_BINARY;
+ const dict_field_t* field = dict_index_get_nth_field(
+ index, cur_field);
+ bool descending = field->descending;
+ ulint mtype = field->col->mtype;
+ ulint prtype = field->col->prtype;
+
+ if (UNIV_LIKELY(!index->is_spatial())) {
+ } else if (cur_field == 0) {
+ ut_ad(DATA_GEOMETRY_MTYPE(mtype));
+ prtype |= DATA_GIS_MBR;
+ } else if (!page_rec_is_leaf(rec2)) {
+ /* Compare the child page number. */
+ ut_ad(cur_field == 1);
+ mtype = DATA_SYS_CHILD;
prtype = 0;
- descending = false;
- } else {
- const dict_field_t* field = dict_index_get_nth_field(
- index, cur_field);
- descending = field->descending;
- mtype = field->col->mtype;
- prtype = field->col->prtype;
-
- if (UNIV_LIKELY(!dict_index_is_spatial(index))) {
- } else if (cur_field == 0) {
- ut_ad(DATA_GEOMETRY_MTYPE(mtype));
- prtype |= DATA_GIS_MBR;
- } else if (!page_rec_is_leaf(rec2)) {
- /* Compare the child page number. */
- ut_ad(cur_field == 1);
- mtype = DATA_SYS_CHILD;
- prtype = 0;
- }
}
/* We should never encounter an externally stored field.
diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc
index 98cf2dda900..f489669b408 100644
--- a/storage/innobase/rem/rem0rec.cc
+++ b/storage/innobase/rem/rem0rec.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -476,7 +476,7 @@ rec_offs_make_valid(
const bool is_alter_metadata = leaf
&& rec_is_alter_metadata(rec, *index);
ut_ad((leaf && rec_is_metadata(rec, *index))
- || index->is_dummy || index->is_ibuf()
+ || index->is_dummy
|| (leaf
? rec_offs_n_fields(offsets)
<= dict_index_get_n_fields(index)
@@ -878,18 +878,15 @@ rec_get_offsets_func(
/* The infimum and supremum records carry 1 field. */
ut_ad(is_user_rec || n == 1);
ut_ad(!is_user_rec || n_core || index->is_dummy
- || dict_index_is_ibuf(index)
|| n == n_fields /* dict_stats_analyze_index_level() */
|| n - 1
== dict_index_get_n_unique_in_tree_nonleaf(index));
ut_ad(!is_user_rec || !n_core || index->is_dummy
- || dict_index_is_ibuf(index)
|| n == n_fields /* btr_pcur_restore_position() */
|| (n + (index->id == DICT_INDEXES_ID) >= n_core));
if (is_user_rec && n_core && n < index->n_fields) {
ut_ad(!index->is_dummy);
- ut_ad(!dict_index_is_ibuf(index));
n = index->n_fields;
}
}
@@ -1968,7 +1965,7 @@ rec_copy_prefix_to_buf(
or NULL */
ulint* buf_size) /*!< in/out: buffer size */
{
- ut_ad(n_fields <= index->n_fields || dict_index_is_ibuf(index));
+ ut_ad(n_fields <= index->n_fields);
ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable));
UNIV_PREFETCH_RW(*buf);
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index 2dcc16130c3..5d7ea475d43 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -2074,7 +2074,7 @@ dberr_t PageConverter::operator()(buf_block_t* block) UNIV_NOTHROW
we no longer evict the pages on DISCARD TABLESPACE. */
buf_page_get_low(block->page.id(), get_zip_size(), RW_NO_LATCH,
nullptr, BUF_PEEK_IF_IN_POOL,
- nullptr, nullptr, false);
+ nullptr, nullptr);
uint16_t page_type;
@@ -2112,8 +2112,9 @@ row_import_cleanup(
row_prebuilt_t* prebuilt, /*!< in/out: prebuilt from handler */
dberr_t err) /*!< in: error code */
{
+ dict_table_t* table = prebuilt->table;
+
if (err != DB_SUCCESS) {
- dict_table_t* table = prebuilt->table;
table->file_unreadable = true;
if (table->space) {
fil_close_tablespace(table->space_id);
@@ -2144,7 +2145,25 @@ row_import_cleanup(
DBUG_EXECUTE_IF("ib_import_before_checkpoint_crash", DBUG_SUICIDE(););
- return(err);
+ if (err != DB_SUCCESS
+ || !dict_table_get_first_index(table)->is_gen_clust()) {
+ return err;
+ }
+
+ btr_cur_t cur;
+ mtr_t mtr;
+ mtr.start();
+ err = cur.open_leaf(false, dict_table_get_first_index(table),
+ BTR_SEARCH_LEAF, &mtr);
+ if (err != DB_SUCCESS) {
+ } else if (const rec_t *rec =
+ page_rec_get_prev(btr_cur_get_rec(&cur))) {
+ if (page_rec_is_user_rec(rec))
+ table->row_id= mach_read_from_6(rec);
+ }
+ mtr.commit();
+
+ return err;
}
/*****************************************************************//**
@@ -2280,55 +2299,6 @@ row_import_adjust_root_pages_of_secondary_indexes(
}
/*****************************************************************//**
-Ensure that dict_sys.row_id exceeds SELECT MAX(DB_ROW_ID). */
-MY_ATTRIBUTE((nonnull)) static
-void
-row_import_set_sys_max_row_id(
-/*==========================*/
- row_prebuilt_t* prebuilt, /*!< in/out: prebuilt from
- handler */
- const dict_table_t* table) /*!< in: table to import */
-{
- const rec_t* rec;
- mtr_t mtr;
- btr_pcur_t pcur;
- row_id_t row_id = 0;
- dict_index_t* index;
-
- index = dict_table_get_first_index(table);
- ut_ad(index->is_primary());
- ut_ad(dict_index_is_auto_gen_clust(index));
-
- mtr_start(&mtr);
-
- mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
-
- if (pcur.open_leaf(false, index, BTR_SEARCH_LEAF, &mtr)
- == DB_SUCCESS) {
- rec = btr_pcur_move_to_prev_on_page(&pcur);
-
- if (!rec) {
- /* The table is corrupted. */
- } else if (page_rec_is_infimum(rec)) {
- /* The table is empty. */
- } else if (rec_is_metadata(rec, *index)) {
- /* The clustered index contains the metadata
- record only, that is, the table is empty. */
- } else {
- row_id = mach_read_from_6(rec);
- }
- }
-
- mtr_commit(&mtr);
-
- if (row_id) {
- /* Update the system row id if the imported index row id is
- greater than the max system row id. */
- dict_sys.update_row_id(row_id);
- }
-}
-
-/*****************************************************************//**
Read the a string from the meta data file.
@return DB_SUCCESS or error code. */
static
@@ -4259,8 +4229,6 @@ row_import_for_mysql(
ut_ad(trx->state == TRX_STATE_ACTIVE);
ut_ad(!table->is_readable());
- ibuf_delete_for_discarded_space(table->space_id);
-
/* Assign an undo segment for the transaction, so that the
transaction will be recovered after a crash. */
@@ -4459,12 +4427,6 @@ row_import_for_mysql(
ut_free(filepath);
- if (err == DB_SUCCESS) {
- err = ibuf_check_bitmap_on_import(trx, table->space);
- }
-
- DBUG_EXECUTE_IF("ib_import_check_bitmap_failure", err = DB_CORRUPTION;);
-
if (err != DB_SUCCESS) {
return row_import_cleanup(prebuilt, err);
}
@@ -4521,13 +4483,6 @@ row_import_for_mysql(
return row_import_error(prebuilt, err);
}
- /* Ensure that the next available DB_ROW_ID is not smaller than
- any DB_ROW_ID stored in the table. */
-
- if (prebuilt->clust_index_was_generated) {
- row_import_set_sys_max_row_id(prebuilt, table);
- }
-
ib::info() << "Phase III - Flush changes to disk";
/* Ensure that all pages dirtied during the IMPORT make it to disk.
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index 4be1aa6c82c..24fb6eb39ce 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -31,7 +31,6 @@ Created 4/20/1996 Heikki Tuuri
#include "btr0btr.h"
#include "btr0cur.h"
#include "mach0data.h"
-#include "ibuf0ibuf.h"
#include "que0que.h"
#include "row0upd.h"
#include "row0sel.h"
@@ -2732,8 +2731,6 @@ err_exit:
page_set_autoinc(root, auto_inc, &mtr, false);
}
- btr_pcur_get_btr_cur(&pcur)->thr = thr;
-
#ifdef UNIV_DEBUG
{
page_t* page = btr_pcur_get_page(&pcur);
@@ -3008,7 +3005,6 @@ row_ins_sec_index_entry_low(
ut_ad(!dict_index_is_clust(index));
ut_ad(mode == BTR_MODIFY_LEAF || mode == BTR_INSERT_TREE);
- cursor.thr = thr;
cursor.rtr_info = NULL;
cursor.page_cur.index = index;
ut_ad(thr_get_trx(thr)->id != 0);
@@ -3030,9 +3026,10 @@ row_ins_sec_index_entry_low(
if (index->is_spatial()) {
rtr_init_rtr_info(&rtr_info, false, &cursor, index, false);
+ rtr_info.thr = thr;
rtr_info_update_btr(&cursor, &rtr_info);
- err = rtr_insert_leaf(&cursor, entry, search_mode, &mtr);
+ err = rtr_insert_leaf(&cursor, thr, entry, search_mode, &mtr);
if (err == DB_SUCCESS && search_mode == BTR_MODIFY_LEAF
&& rtr_info.mbr_adj) {
@@ -3041,6 +3038,7 @@ row_ins_sec_index_entry_low(
rtr_clean_rtr_info(&rtr_info, true);
rtr_init_rtr_info(&rtr_info, false, &cursor,
index, false);
+ rtr_info.thr = thr;
rtr_info_update_btr(&cursor, &rtr_info);
mtr.start();
if (index->table->is_temporary()) {
@@ -3048,7 +3046,7 @@ row_ins_sec_index_entry_low(
} else {
index->set_modified(mtr);
}
- err = rtr_insert_leaf(&cursor, entry,
+ err = rtr_insert_leaf(&cursor, thr, entry,
search_mode, &mtr);
}
@@ -3057,14 +3055,6 @@ row_ins_sec_index_entry_low(
goto func_exit;});
} else {
- if (!index->table->is_temporary()) {
- search_mode = btr_latch_mode(
- search_mode
- | (thr_get_trx(thr)->check_unique_secondary
- ? BTR_INSERT | BTR_IGNORE_SEC_UNIQUE
- : BTR_INSERT));
- }
-
err = cursor.search_leaf(entry, PAGE_CUR_LE, search_mode,
&mtr);
}
@@ -3076,12 +3066,6 @@ row_ins_sec_index_entry_low(
goto func_exit;
}
- if (cursor.flag == BTR_CUR_INSERT_TO_IBUF) {
- ut_ad(!dict_index_is_spatial(index));
- /* The insert was buffered during the search: we are done */
- goto func_exit;
- }
-
#ifdef UNIV_DEBUG
{
page_t* page = btr_cur_get_page(&cursor);
@@ -3141,13 +3125,9 @@ row_ins_sec_index_entry_low(
locked with s-locks the necessary records to
prevent any insertion of a duplicate by another
transaction. Let us now reposition the cursor and
- continue the insertion (bypassing the change buffer). */
- err = cursor.search_leaf(
- entry, PAGE_CUR_LE,
- btr_latch_mode(search_mode
- & ~(BTR_INSERT
- | BTR_IGNORE_SEC_UNIQUE)),
- &mtr);
+ continue the insertion. */
+ err = cursor.search_leaf(entry, PAGE_CUR_LE, search_mode,
+ &mtr);
if (err != DB_SUCCESS) {
goto func_exit;
}
@@ -3378,11 +3358,6 @@ row_ins_sec_index_entry(
if (err == DB_FAIL) {
mem_heap_empty(heap);
- if (index->table->space == fil_system.sys_space
- && !(index->type & (DICT_UNIQUE | DICT_SPATIAL))) {
- ibuf_free_excess_pages();
- }
-
/* Try then pessimistic descent to the B-tree */
log_free_check();
@@ -3606,19 +3581,6 @@ row_ins_index_entry_step(
}
/***********************************************************//**
-Allocates a row id for row and inits the node->index field. */
-UNIV_INLINE
-void
-row_ins_alloc_row_id_step(
-/*======================*/
- ins_node_t* node) /*!< in: row insert node */
-{
- ut_ad(node->state == INS_NODE_ALLOC_ROW_ID);
- if (dict_table_get_first_index(node->table)->is_gen_clust())
- dict_sys_write_row_id(node->sys_buf, dict_sys.get_new_row_id());
-}
-
-/***********************************************************//**
Gets a row to insert from the values list. */
UNIV_INLINE
void
@@ -3698,13 +3660,18 @@ row_ins(
DBUG_PRINT("row_ins", ("table: %s", node->table->name.m_name));
if (node->state == INS_NODE_ALLOC_ROW_ID) {
-
- row_ins_alloc_row_id_step(node);
-
node->index = dict_table_get_first_index(node->table);
ut_ad(node->entry_list.empty() == false);
node->entry = node->entry_list.begin();
+ if (node->index->is_gen_clust()) {
+ const uint64_t db_row_id{++node->table->row_id};
+ if (db_row_id >> 48) {
+ DBUG_RETURN(DB_OUT_OF_FILE_SPACE);
+ }
+ mach_write_to_6(node->sys_buf, db_row_id);
+ }
+
if (node->ins_type == INS_SEARCHED) {
row_ins_get_row_from_select(node);
diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc
index 3302bf934da..010b347c003 100644
--- a/storage/innobase/row/row0log.cc
+++ b/storage/innobase/row/row0log.cc
@@ -1701,22 +1701,6 @@ err_exit:
if (error) {
goto err_exit;
}
-#ifdef UNIV_DEBUG
- switch (btr_pcur_get_btr_cur(pcur)->flag) {
- case BTR_CUR_DELETE_REF:
- case BTR_CUR_DEL_MARK_IBUF:
- case BTR_CUR_DELETE_IBUF:
- case BTR_CUR_INSERT_TO_IBUF:
- /* We did not request buffering. */
- break;
- case BTR_CUR_HASH:
- case BTR_CUR_HASH_FAIL:
- case BTR_CUR_BINARY:
- goto flag_ok;
- }
- ut_ad(0);
-flag_ok:
-#endif /* UNIV_DEBUG */
if (page_rec_is_infimum(btr_pcur_get_rec(pcur))
|| btr_pcur_get_low_match(pcur) < index->n_uniq) {
@@ -1724,8 +1708,8 @@ flag_ok:
found, because new_table is being modified by
this thread only, and all indexes should be
updated in sync. */
- mtr->commit();
- return(DB_INDEX_CORRUPT);
+ error = DB_INDEX_CORRUPT;
+ goto err_exit;
}
btr_cur_pessimistic_delete(&error, FALSE,
@@ -1785,22 +1769,6 @@ row_log_table_apply_delete(
if (err != DB_SUCCESS) {
goto all_done;
}
-#ifdef UNIV_DEBUG
- switch (btr_pcur_get_btr_cur(&pcur)->flag) {
- case BTR_CUR_DELETE_REF:
- case BTR_CUR_DEL_MARK_IBUF:
- case BTR_CUR_DELETE_IBUF:
- case BTR_CUR_INSERT_TO_IBUF:
- /* We did not request buffering. */
- break;
- case BTR_CUR_HASH:
- case BTR_CUR_HASH_FAIL:
- case BTR_CUR_BINARY:
- goto flag_ok;
- }
- ut_ad(0);
-flag_ok:
-#endif /* UNIV_DEBUG */
if (page_rec_is_infimum(btr_pcur_get_rec(&pcur))
|| btr_pcur_get_low_match(&pcur) < index->n_uniq) {
@@ -1934,19 +1902,6 @@ func_exit_committed:
return error;
}
-#ifdef UNIV_DEBUG
- switch (btr_pcur_get_btr_cur(&pcur)->flag) {
- case BTR_CUR_DELETE_REF:
- case BTR_CUR_DEL_MARK_IBUF:
- case BTR_CUR_DELETE_IBUF:
- case BTR_CUR_INSERT_TO_IBUF:
- ut_ad(0);/* We did not request buffering. */
- case BTR_CUR_HASH:
- case BTR_CUR_HASH_FAIL:
- case BTR_CUR_BINARY:
- break;
- }
-#endif /* UNIV_DEBUG */
ut_ad(!page_rec_is_infimum(btr_pcur_get_rec(&pcur))
&& btr_pcur_get_low_match(&pcur) >= index->n_uniq);
@@ -2096,8 +2051,17 @@ func_exit_committed:
ut_free(pcur.old_rec_buf);
pcur.old_rec_buf = nullptr;
- if (ROW_FOUND != row_search_index_entry(
- entry, BTR_MODIFY_TREE, &pcur, &mtr)) {
+ error = btr_pcur_open(entry, PAGE_CUR_LE, BTR_MODIFY_TREE,
+ &pcur, &mtr);
+
+ if (error != DB_SUCCESS) {
+ ut_ad(0);
+ break;
+ }
+
+ if (btr_pcur_is_before_first_on_page(&pcur)
+ || btr_pcur_get_low_match(&pcur)
+ != dtuple_get_n_fields(entry)) {
ut_ad(0);
error = DB_CORRUPTION;
break;
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index 5601a786555..5a16b5d8ec4 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -150,7 +150,7 @@ public:
false);
rtr_info_update_btr(&ins_cur, &rtr_info);
- error = rtr_insert_leaf(&ins_cur, dtuple,
+ error = rtr_insert_leaf(&ins_cur, nullptr, dtuple,
BTR_MODIFY_LEAF, &mtr);
/* It need to update MBR in parent entry,
@@ -163,7 +163,8 @@ public:
rtr_info_update_btr(&ins_cur, &rtr_info);
mtr.start();
index->set_modified(mtr);
- error = rtr_insert_leaf(&ins_cur, dtuple,
+ error = rtr_insert_leaf(&ins_cur, nullptr,
+ dtuple,
BTR_MODIFY_TREE, &mtr);
}
@@ -186,7 +187,8 @@ public:
&ins_cur, index, false);
rtr_info_update_btr(&ins_cur, &rtr_info);
- error = rtr_insert_leaf(&ins_cur, dtuple,
+ error = rtr_insert_leaf(&ins_cur, nullptr,
+ dtuple,
BTR_MODIFY_TREE, &mtr);
if (error == DB_SUCCESS) {
@@ -2221,7 +2223,7 @@ end_of_index:
next_page_no),
old_table->space->zip_size(),
RW_S_LATCH, nullptr, BUF_GET, &mtr,
- &err, false);
+ &err);
if (!block) {
goto err_exit;
}
@@ -3709,8 +3711,6 @@ row_merge_mtuple_to_dtuple(
dtuple_t* dtuple,
const mtuple_t* mtuple)
{
- ut_ad(!dict_index_is_ibuf(index));
-
memcpy(dtuple->fields, mtuple->fields,
dtuple->n_fields * sizeof *mtuple->fields);
}
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 549d2745223..81879431096 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2000, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2022, MariaDB Corporation.
+Copyright (c) 2015, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -43,7 +43,6 @@ Created 9/17/2000 Heikki Tuuri
#include "fsp0file.h"
#include "fts0fts.h"
#include "fts0types.h"
-#include "ibuf0ibuf.h"
#include "lock0lock.h"
#include "log0log.h"
#include "pars0pars.h"
@@ -2334,12 +2333,7 @@ row_discard_tablespace(
2) Purge and rollback: we assign a new table id for the
table. Since purge and rollback look for the table based on
the table id, they see the table as 'dropped' and discard
- their operations.
-
- 3) Insert buffer: we remove all entries for the tablespace in
- the insert buffer tree. */
-
- ibuf_delete_for_discarded_space(table->space_id);
+ their operations. */
table_id_t new_id;
@@ -2442,9 +2436,8 @@ rollback:
/* Note: The following cannot be rolled back. Rollback would see the
UPDATE of SYS_INDEXES.TABLE_ID as two operations: DELETE and INSERT.
It would invoke btr_free_if_exists() when rolling back the INSERT,
- effectively dropping all indexes of the table. Furthermore, calls like
- ibuf_delete_for_discarded_space() are already discarding data
- before the transaction is committed.
+ effectively dropping all indexes of the table. Furthermore, we are
+ already discarding data before the transaction is committed.
It would be better to remove the integrity-breaking
ALTER TABLE...DISCARD TABLESPACE operation altogether. */
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index 65d26e0a733..0a2647e8d6d 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -273,10 +273,10 @@ not delete marked version of a clustered index record where DB_TRX_ID
is newer than the purge view.
NOTE: This function should only be called by the purge thread, only
-while holding a latch on the leaf page of the secondary index entry
-(or keeping the buffer pool watch on the page). It is possible that
-this function first returns true and then false, if a user transaction
-inserts a record that the secondary index entry would refer to.
+while holding a latch on the leaf page of the secondary index entry.
+It is possible that this function first returns true and then false,
+if a user transaction inserts a record that the secondary index entry
+would refer to.
However, in that case, the user transaction would also re-insert the
secondary index entry after purge has removed it and released the leaf
page latch.
@@ -292,6 +292,7 @@ page latch.
@param[in] is_tree true=pessimistic purge,
false=optimistic (leaf-page only)
@return true if the secondary index record can be purged */
+static
bool
row_purge_poss_sec(
purge_node_t* node,
@@ -349,14 +350,11 @@ row_purge_remove_sec_if_poss_tree(
pcur.btr_cur.page_cur.index = index;
if (index->is_spatial()) {
- if (!rtr_search(entry, BTR_PURGE_TREE, &pcur, &mtr)) {
- goto found;
+ if (rtr_search(entry, BTR_PURGE_TREE, &pcur, nullptr, &mtr)) {
+ goto func_exit;
}
- goto func_exit;
- }
-
- switch (row_search_index_entry(entry, BTR_PURGE_TREE, &pcur, &mtr)) {
- case ROW_NOT_FOUND:
+ } else if (!row_search_index_entry(entry, BTR_PURGE_TREE,
+ &pcur, &mtr)) {
/* Not found. This is a legitimate condition. In a
rollback, InnoDB will remove secondary recs that would
be purged anyway. Then the actual purge will not find
@@ -366,25 +364,13 @@ row_purge_remove_sec_if_poss_tree(
index, it will remove it. Then if/when the purge
comes to consider the secondary index record a second
time, it will not exist any more in the index. */
-
- /* fputs("PURGE:........sec entry not found\n", stderr); */
- /* dtuple_print(stderr, entry); */
goto func_exit;
- case ROW_FOUND:
- break;
- case ROW_BUFFERED:
- case ROW_NOT_DELETED_REF:
- /* These are invalid outcomes, because the mode passed
- to row_search_index_entry() did not include any of the
- flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
- ut_error;
}
/* We should remove the index record if no later version of the row,
which cannot be purged yet, requires its existence. If some requires,
we should do nothing. */
-found:
if (row_purge_poss_sec(node, index, entry, &pcur, &mtr, true)) {
/* Remove the index record, which should have been
@@ -453,24 +439,17 @@ row_purge_remove_sec_if_poss_leaf(
pcur.btr_cur.page_cur.index = index;
- /* Set the purge node for the call to row_purge_poss_sec(). */
- pcur.btr_cur.purge_node = node;
if (index->is_spatial()) {
- pcur.btr_cur.thr = NULL;
- if (!rtr_search(entry, BTR_MODIFY_LEAF, &pcur, &mtr)) {
+ if (!rtr_search(entry, BTR_MODIFY_LEAF, &pcur, nullptr,
+ &mtr)) {
goto found;
}
- goto func_exit;
- }
-
- /* Set the query thread, so that ibuf_insert_low() will be
- able to invoke thd_get_trx(). */
- pcur.btr_cur.thr = static_cast<que_thr_t*>(que_node_get_parent(node));
-
- switch (row_search_index_entry(entry, index->has_virtual()
- ? BTR_MODIFY_LEAF : BTR_PURGE_LEAF,
- &pcur, &mtr)) {
- case ROW_FOUND:
+ } else if (btr_pcur_open(entry, PAGE_CUR_LE, BTR_MODIFY_LEAF, &pcur,
+ &mtr)
+ == DB_SUCCESS
+ && !btr_pcur_is_before_first_on_page(&pcur)
+ && btr_pcur_get_low_match(&pcur)
+ == dtuple_get_n_fields(entry)) {
found:
/* Before attempting to purge a record, check
if it is safe to do so. */
@@ -499,25 +478,18 @@ found:
if (index->is_spatial()) {
const buf_block_t* block = btr_cur_get_block(
btr_cur);
+ const page_id_t id{block->page.id()};
- if (block->page.id().page_no()
- != index->page
+ if (id.page_no() != index->page
&& page_get_n_recs(block->page.frame) < 2
- && !lock_test_prdt_page_lock(
- btr_cur->rtr_info
- && btr_cur->rtr_info->thr
- ? thr_get_trx(
- btr_cur->rtr_info->thr)
- : nullptr,
- block->page.id())) {
+ && !lock_test_prdt_page_lock(nullptr, id)){
/* this is the last record on page,
and it has a "page" lock on it,
which mean search is still depending
on it, so do not delete */
DBUG_LOG("purge",
"skip purging last"
- " record on page "
- << block->page.id());
+ " record on page " << id);
goto func_exit;
}
}
@@ -525,25 +497,13 @@ found:
success = btr_cur_optimistic_delete(btr_cur, 0, &mtr)
!= DB_FAIL;
}
+ }
- /* (The index entry is still needed,
- or the deletion succeeded) */
- /* fall through */
- case ROW_NOT_DELETED_REF:
- /* The index entry is still needed. */
- case ROW_BUFFERED:
- /* The deletion was buffered. */
- case ROW_NOT_FOUND:
- /* The index entry does not exist, nothing to do. */
func_exit:
- mtr.commit();
+ mtr.commit();
cleanup:
- btr_pcur_close(&pcur); // FIXME: do we need these? when is btr_cur->rtr_info set?
- return(success);
- }
-
- ut_error;
- return(false);
+ btr_pcur_close(&pcur);
+ return success;
}
/***********************************************************//**
@@ -596,10 +556,7 @@ Purges a delete marking of a record.
@retval false the purge needs to be suspended because of
running out of file space */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
-bool
-row_purge_del_mark(
-/*===============*/
- purge_node_t* node) /*!< in/out: row purge node */
+bool row_purge_del_mark(purge_node_t *node)
{
if (node->index)
{
diff --git a/storage/innobase/row/row0quiesce.cc b/storage/innobase/row/row0quiesce.cc
index a4d634f2d14..059aee6f140 100644
--- a/storage/innobase/row/row0quiesce.cc
+++ b/storage/innobase/row/row0quiesce.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2021, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -26,7 +26,6 @@ Created 2012-02-08 by Sunny Bains.
#include "row0quiesce.h"
#include "row0mysql.h"
-#include "ibuf0ibuf.h"
#include "srv0start.h"
#include "trx0purge.h"
@@ -533,18 +532,6 @@ row_quiesce_table_start(
purge_sys.stop();
}
- for (ulint count = 0;
- ibuf_merge_space(table->space_id);
- ++count) {
- if (trx_is_interrupted(trx)) {
- goto aborted;
- }
- if (!(count % 20)) {
- ib::info() << "Merging change buffer entries for "
- << table->name;
- }
- }
-
while (buf_flush_list_space(table->space)) {
if (trx_is_interrupted(trx)) {
goto aborted;
diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc
index 4a00b2a430e..a7cddee0b77 100644
--- a/storage/innobase/row/row0row.cc
+++ b/storage/innobase/row/row0row.cc
@@ -215,28 +215,20 @@ row_build_index_entry_low(
entry = dtuple_create(heap, entry_len);
}
- if (dict_index_is_ibuf(index)) {
- dtuple_set_n_fields_cmp(entry, entry_len);
- /* There may only be externally stored columns
- in a clustered index B-tree of a user table. */
- ut_a(!ext);
- } else {
- dtuple_set_n_fields_cmp(
- entry, dict_index_get_n_unique_in_tree(index));
- if (dict_index_is_spatial(index)) {
- /* Set the MBR field */
- if (!row_build_spatial_index_key(
- index, ext,
- dtuple_get_nth_field(entry, 0),
- dtuple_get_nth_field(
- row,
- dict_index_get_nth_field(index, i)
- ->col->ind), flag, heap)) {
- return NULL;
- }
-
- i = 1;
+ dtuple_set_n_fields_cmp(entry, dict_index_get_n_unique_in_tree(index));
+ if (index->is_spatial()) {
+ /* Set the MBR field */
+ if (!row_build_spatial_index_key(
+ index, ext,
+ dtuple_get_nth_field(entry, 0),
+ dtuple_get_nth_field(
+ row,
+ dict_index_get_nth_field(index, i)
+ ->col->ind), flag, heap)) {
+ return NULL;
}
+
+ i = 1;
}
for (; i < entry_len; i++) {
@@ -1262,8 +1254,8 @@ row_get_clust_rec(
/***************************************************************//**
Searches an index record.
-@return whether the record was found or buffered */
-enum row_search_result
+@return whether the record was found */
+bool
row_search_index_entry(
/*===================*/
const dtuple_t* entry, /*!< in: index entry */
@@ -1272,47 +1264,14 @@ row_search_index_entry(
be closed by the caller */
mtr_t* mtr) /*!< in: mtr */
{
- ulint n_fields;
- ulint low_match;
- rec_t* rec;
-
ut_ad(dtuple_check_typed(entry));
if (btr_pcur_open(entry, PAGE_CUR_LE, mode, pcur, mtr) != DB_SUCCESS) {
- return ROW_NOT_FOUND;
- }
-
- switch (btr_pcur_get_btr_cur(pcur)->flag) {
- case BTR_CUR_DELETE_REF:
- ut_ad(!(~mode & BTR_DELETE));
- return(ROW_NOT_DELETED_REF);
-
- case BTR_CUR_DEL_MARK_IBUF:
- case BTR_CUR_DELETE_IBUF:
- case BTR_CUR_INSERT_TO_IBUF:
- return(ROW_BUFFERED);
-
- case BTR_CUR_HASH:
- case BTR_CUR_HASH_FAIL:
- case BTR_CUR_BINARY:
- break;
- }
-
- low_match = btr_pcur_get_low_match(pcur);
-
- rec = btr_pcur_get_rec(pcur);
-
- n_fields = dtuple_get_n_fields(entry);
-
- if (page_rec_is_infimum(rec)) {
-
- return(ROW_NOT_FOUND);
- } else if (low_match != n_fields) {
-
- return(ROW_NOT_FOUND);
+ return false;
}
- return(ROW_FOUND);
+ return !btr_pcur_is_before_first_on_page(pcur)
+ && btr_pcur_get_low_match(pcur) == dtuple_get_n_fields(entry);
}
/*******************************************************************//**
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 9ef145236a8..fa7e129752a 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -4040,7 +4040,8 @@ row_search_idx_cond_check(
ut_ad(rec_offs_validate(rec, prebuilt->index, offsets));
if (!prebuilt->idx_cond) {
- if (!handler_rowid_filter_is_active(prebuilt->pk_filter)) {
+ if (!prebuilt->pk_filter ||
+ !handler_rowid_filter_is_active(prebuilt->pk_filter)) {
return(CHECK_POS);
}
} else {
@@ -4082,7 +4083,8 @@ row_search_idx_cond_check(
switch (result) {
case CHECK_POS:
- if (handler_rowid_filter_is_active(prebuilt->pk_filter)) {
+ if (prebuilt->pk_filter &&
+ handler_rowid_filter_is_active(prebuilt->pk_filter)) {
ut_ad(!prebuilt->index->is_primary());
if (prebuilt->clust_index_was_generated) {
ulint len;
@@ -4768,14 +4770,13 @@ wait_table_again:
}
} else if (dtuple_get_n_fields(search_tuple) > 0) {
- pcur->btr_cur.thr = thr;
pcur->old_rec = nullptr;
if (index->is_spatial()) {
if (!prebuilt->rtr_info) {
prebuilt->rtr_info = rtr_create_rtr_info(
- set_also_gap_locks, true,
- btr_pcur_get_btr_cur(pcur), index);
+ set_also_gap_locks, true, thr,
+ btr_pcur_get_btr_cur(pcur));
prebuilt->rtr_info->search_tuple = search_tuple;
prebuilt->rtr_info->search_mode = mode;
rtr_info_update_btr(btr_pcur_get_btr_cur(pcur),
@@ -4788,7 +4789,8 @@ wait_table_again:
prebuilt->rtr_info->search_mode = mode;
}
- err = rtr_search_leaf(pcur, search_tuple, mode, &mtr);
+ err = rtr_search_leaf(pcur, thr, search_tuple, mode,
+ &mtr);
} else {
err = btr_pcur_open_with_no_init(search_tuple, mode,
BTR_SEARCH_LEAF,
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index 50196e78092..6b4393d4113 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -40,7 +40,6 @@ Created 2/25/1997 Heikki Tuuri
#include "row0row.h"
#include "row0upd.h"
#include "que0que.h"
-#include "ibuf0ibuf.h"
#include "log0log.h"
#include "fil0fil.h"
#include <mysql/service_thd_mdl.h>
@@ -266,7 +265,7 @@ row_undo_ins_remove_sec_low(
const bool modify_leaf = mode == BTR_MODIFY_LEAF;
pcur.btr_cur.page_cur.index = index;
- row_mtr_start(&mtr, index, !modify_leaf);
+ row_mtr_start(&mtr, index);
if (index->is_spatial()) {
mode = modify_leaf
@@ -274,8 +273,7 @@ row_undo_ins_remove_sec_low(
| BTR_RTREE_DELETE_MARK
| BTR_RTREE_UNDO_INS)
: btr_latch_mode(BTR_PURGE_TREE | BTR_RTREE_UNDO_INS);
- btr_pcur_get_btr_cur(&pcur)->thr = thr;
- if (rtr_search(entry, mode, &pcur, &mtr)) {
+ if (rtr_search(entry, mode, &pcur, thr, &mtr)) {
goto func_exit;
}
@@ -296,28 +294,17 @@ row_undo_ins_remove_sec_low(
mtr_x_lock_index(index, &mtr);
}
- switch (row_search_index_entry(entry, mode, &pcur, &mtr)) {
- case ROW_BUFFERED:
- case ROW_NOT_DELETED_REF:
- /* These are invalid outcomes, because the mode passed
- to row_search_index_entry() did not include any of the
- flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
- ut_error;
- case ROW_NOT_FOUND:
- break;
- case ROW_FOUND:
- found:
- btr_cur_t* btr_cur = btr_pcur_get_btr_cur(&pcur);
-
+ if (row_search_index_entry(entry, mode, &pcur, &mtr)) {
+found:
if (modify_leaf) {
- err = btr_cur_optimistic_delete(btr_cur, 0, &mtr);
+ err = btr_cur_optimistic_delete(&pcur.btr_cur, 0, &mtr);
} else {
/* Passing rollback=false here, because we are
deleting a secondary index record: the distinction
only matters when deleting a record that contains
externally stored columns. */
- btr_cur_pessimistic_delete(&err, FALSE, btr_cur, 0,
- false, &mtr);
+ btr_cur_pessimistic_delete(&err, FALSE, &pcur.btr_cur,
+ 0, false, &mtr);
}
}
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 50e15e03cc9..63393d86502 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -33,7 +33,6 @@ Created 2/27/1997 Heikki Tuuri
#include "trx0purge.h"
#include "btr0btr.h"
#include "mach0data.h"
-#include "ibuf0ibuf.h"
#include "row0undo.h"
#include "row0vers.h"
#include "trx0trx.h"
@@ -491,7 +490,7 @@ row_undo_mod_del_mark_or_remove_sec_low(
mtr_t mtr_vers;
const bool modify_leaf = mode == BTR_MODIFY_LEAF;
- row_mtr_start(&mtr, index, !modify_leaf);
+ row_mtr_start(&mtr, index);
pcur.btr_cur.page_cur.index = index;
btr_cur = btr_pcur_get_btr_cur(&pcur);
@@ -502,8 +501,7 @@ row_undo_mod_del_mark_or_remove_sec_low(
| BTR_RTREE_DELETE_MARK
| BTR_RTREE_UNDO_INS)
: btr_latch_mode(BTR_PURGE_TREE | BTR_RTREE_UNDO_INS);
- btr_cur->thr = thr;
- if (UNIV_LIKELY(!rtr_search(entry, mode, &pcur, &mtr))) {
+ if (UNIV_LIKELY(!rtr_search(entry, mode, &pcur, thr, &mtr))) {
goto found;
} else {
goto func_exit;
@@ -527,9 +525,7 @@ row_undo_mod_del_mark_or_remove_sec_low(
ut_ad(!dict_index_is_online_ddl(index));
}
- switch (UNIV_EXPECT(row_search_index_entry(entry, mode, &pcur, &mtr),
- ROW_FOUND)) {
- case ROW_NOT_FOUND:
+ if (!row_search_index_entry(entry, mode, &pcur, &mtr)) {
/* In crash recovery, the secondary index record may
be missing if the UPDATE did not have time to insert
the secondary index records before the crash. When we
@@ -540,14 +536,6 @@ row_undo_mod_del_mark_or_remove_sec_low(
before it has inserted all updated secondary index
records, then the undo will not find those records. */
goto func_exit;
- case ROW_FOUND:
- break;
- case ROW_BUFFERED:
- case ROW_NOT_DELETED_REF:
- /* These are invalid outcomes, because the mode passed
- to row_search_index_entry() did not include any of the
- flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
- ut_error;
}
found:
@@ -685,12 +673,13 @@ row_undo_mod_del_unmark_sec_and_undo_update(
}
try_again:
- row_mtr_start(&mtr, index, mode & 8);
+ row_mtr_start(&mtr, index);
- btr_cur->thr = thr;
+ mem_heap_t* offsets_heap = nullptr;
+ rec_offs* offsets = nullptr;
if (index->is_spatial()) {
- if (!rtr_search(entry, mode, &pcur, &mtr)) {
+ if (!rtr_search(entry, mode, &pcur, thr, &mtr)) {
goto found;
}
@@ -704,17 +693,7 @@ try_again:
goto not_found;
}
- switch (row_search_index_entry(entry, mode, &pcur, &mtr)) {
- mem_heap_t* heap;
- mem_heap_t* offsets_heap;
- rec_offs* offsets;
- case ROW_BUFFERED:
- case ROW_NOT_DELETED_REF:
- /* These are invalid outcomes, because the mode passed
- to row_search_index_entry() did not include any of the
- flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
- ut_error;
- case ROW_NOT_FOUND:
+ if (!row_search_index_entry(entry, mode, &pcur, &mtr)) {
not_found:
if (btr_cur->up_match >= dict_index_get_n_unique(index)
|| btr_cur->low_match >= dict_index_get_n_unique(index)) {
@@ -726,7 +705,7 @@ not_found:
<< " at: " << rec_index_print(
btr_cur_get_rec(btr_cur), index);
err = DB_DUPLICATE_KEY;
- break;
+ goto func_exit;
}
ib::warn() << "Record in index " << index->name
@@ -740,8 +719,6 @@ not_found:
delete-unmark. */
big_rec_t* big_rec;
rec_t* insert_rec;
- offsets = NULL;
- offsets_heap = NULL;
err = btr_cur_optimistic_insert(
flags, btr_cur, &offsets, &offsets_heap,
@@ -770,16 +747,13 @@ not_found:
if (offsets_heap) {
mem_heap_free(offsets_heap);
}
-
- break;
- case ROW_FOUND:
+ } else {
found:
btr_rec_set_deleted<false>(btr_cur_get_block(btr_cur),
btr_cur_get_rec(btr_cur), &mtr);
- heap = mem_heap_create(
+ mem_heap_t* heap = mem_heap_create(
sizeof(upd_t)
+ dtuple_get_n_fields(entry) * sizeof(upd_field_t));
- offsets_heap = NULL;
offsets = rec_get_offsets(
btr_cur_get_rec(btr_cur),
index, nullptr, index->n_core_fields, ULINT_UNDEFINED,
@@ -818,6 +792,7 @@ found:
mem_heap_free(offsets_heap);
}
+func_exit:
btr_pcur_close(&pcur);
mtr_commit(&mtr);
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index fe88fce58a2..15a0ebb277c 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -1838,9 +1838,7 @@ row_upd_sec_index_entry(
dict_index_t* index;
dberr_t err = DB_SUCCESS;
trx_t* trx = thr_get_trx(thr);
- btr_latch_mode mode;
ulint flags;
- enum row_search_result search_result;
ut_ad(trx->id != 0);
@@ -1868,7 +1866,6 @@ row_upd_sec_index_entry(
"before_row_upd_sec_index_entry");
mtr.start();
- mode = BTR_MODIFY_LEAF;
switch (index->table->space_id) {
case SRV_TMP_SPACE_ID:
@@ -1878,24 +1875,17 @@ row_upd_sec_index_entry(
default:
index->set_modified(mtr);
/* fall through */
- case IBUF_SPACE_ID:
+ case 0:
flags = index->table->no_rollback() ? BTR_NO_ROLLBACK : 0;
- /* We can only buffer delete-mark operations if there
- are no foreign key constraints referring to the index. */
- if (!referenced) {
- mode = BTR_DELETE_MARK_LEAF;
- }
- break;
}
- /* Set the query thread, so that ibuf_insert_low() will be
- able to invoke thd_get_trx(). */
- pcur.btr_cur.thr = thr;
pcur.btr_cur.page_cur.index = index;
+ const rec_t *rec;
if (index->is_spatial()) {
- mode = btr_latch_mode(BTR_MODIFY_LEAF | BTR_RTREE_DELETE_MARK);
- if (UNIV_LIKELY(!rtr_search(entry, mode, &pcur, &mtr))) {
+ constexpr btr_latch_mode mode = btr_latch_mode(
+ BTR_MODIFY_LEAF | BTR_RTREE_DELETE_MARK);
+ if (UNIV_LIKELY(!rtr_search(entry, mode, &pcur, thr, &mtr))) {
goto found;
}
@@ -1905,20 +1895,8 @@ row_upd_sec_index_entry(
}
goto not_found;
- }
-
- search_result = row_search_index_entry(entry, mode, &pcur, &mtr);
-
- switch (search_result) {
- const rec_t* rec;
- case ROW_NOT_DELETED_REF: /* should only occur for BTR_DELETE */
- ut_error;
- break;
- case ROW_BUFFERED:
- /* Entry was delete marked already. */
- break;
-
- case ROW_NOT_FOUND:
+ } else if (!row_search_index_entry(entry, BTR_MODIFY_LEAF,
+ &pcur, &mtr)) {
not_found:
rec = btr_pcur_get_rec(&pcur);
ib::error()
@@ -1932,8 +1910,7 @@ not_found:
ut_ad(btr_validate_index(index, 0) == DB_SUCCESS);
ut_ad(0);
#endif /* UNIV_DEBUG */
- break;
- case ROW_FOUND:
+ } else {
found:
ut_ad(err == DB_SUCCESS);
rec = btr_pcur_get_rec(&pcur);
@@ -1948,7 +1925,7 @@ found:
btr_pcur_get_block(&pcur),
btr_pcur_get_rec(&pcur), index, thr, &mtr);
if (err != DB_SUCCESS) {
- break;
+ goto close;
}
btr_rec_set_deleted<true>(btr_pcur_get_block(&pcur),
diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc
index d0f96ece141..987d3d185d9 100644
--- a/storage/innobase/srv/srv0mon.cc
+++ b/storage/innobase/srv/srv0mon.cc
@@ -2,7 +2,7 @@
Copyright (c) 2010, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -27,7 +27,6 @@ Created 12/9/2009 Jimmy Yang
#include "buf0buf.h"
#include "dict0mem.h"
-#include "ibuf0ibuf.h"
#include "lock0lock.h"
#include "mach0data.h"
#include "os0file.h"
@@ -527,23 +526,10 @@ static monitor_info_t innodb_counter_info[] =
MONITOR_BUF_PAGE_READ("index_non_leaf","Index Non-leaf",
INDEX_NON_LEAF),
- MONITOR_BUF_PAGE_READ("index_ibuf_leaf", "Insert Buffer Index Leaf",
- INDEX_IBUF_LEAF),
-
- MONITOR_BUF_PAGE_READ("index_ibuf_non_leaf",
- "Insert Buffer Index Non-Leaf",
- INDEX_IBUF_NON_LEAF),
-
MONITOR_BUF_PAGE_READ("undo_log", "Undo Log", UNDO_LOG),
MONITOR_BUF_PAGE_READ("index_inode", "Index Inode", INODE),
- MONITOR_BUF_PAGE_READ("ibuf_free_list", "Insert Buffer Free List",
- IBUF_FREELIST),
-
- MONITOR_BUF_PAGE_READ("ibuf_bitmap", "Insert Buffer Bitmap",
- IBUF_BITMAP),
-
MONITOR_BUF_PAGE_READ("system_page", "System", SYSTEM),
MONITOR_BUF_PAGE_READ("trx_system", "Transaction System", TRX_SYSTEM),
@@ -566,23 +552,10 @@ static monitor_info_t innodb_counter_info[] =
MONITOR_BUF_PAGE_WRITTEN("index_non_leaf","Index Non-leaf",
INDEX_NON_LEAF),
- MONITOR_BUF_PAGE_WRITTEN("index_ibuf_leaf", "Insert Buffer Index Leaf",
- INDEX_IBUF_LEAF),
-
- MONITOR_BUF_PAGE_WRITTEN("index_ibuf_non_leaf",
- "Insert Buffer Index Non-Leaf",
- INDEX_IBUF_NON_LEAF),
-
MONITOR_BUF_PAGE_WRITTEN("undo_log", "Undo Log", UNDO_LOG),
MONITOR_BUF_PAGE_WRITTEN("index_inode", "Index Inode", INODE),
- MONITOR_BUF_PAGE_WRITTEN("ibuf_free_list", "Insert Buffer Free List",
- IBUF_FREELIST),
-
- MONITOR_BUF_PAGE_WRITTEN("ibuf_bitmap", "Insert Buffer Bitmap",
- IBUF_BITMAP),
-
MONITOR_BUF_PAGE_WRITTEN("system_page", "System", SYSTEM),
MONITOR_BUF_PAGE_WRITTEN("trx_system", "Transaction System",
@@ -948,57 +921,6 @@ static monitor_info_t innodb_counter_info[] =
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
MONITOR_DEFAULT_START, MONITOR_OVLD_N_FILE_OPENED},
- /* ========== Counters for Change Buffer ========== */
- {"module_ibuf_system", "change_buffer", "InnoDB Change Buffer",
- MONITOR_MODULE,
- MONITOR_DEFAULT_START, MONITOR_MODULE_IBUF_SYSTEM},
-
- {"ibuf_merges_insert", "change_buffer",
- "Number of inserted records merged by change buffering",
- static_cast<monitor_type_t>(
- MONITOR_EXISTING | MONITOR_DEFAULT_ON),
- MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_INSERT},
-
- {"ibuf_merges_delete_mark", "change_buffer",
- "Number of deleted records merged by change buffering",
- static_cast<monitor_type_t>(
- MONITOR_EXISTING | MONITOR_DEFAULT_ON),
- MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_DELETE},
-
- {"ibuf_merges_delete", "change_buffer",
- "Number of purge records merged by change buffering",
- static_cast<monitor_type_t>(
- MONITOR_EXISTING | MONITOR_DEFAULT_ON),
- MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_PURGE},
-
- {"ibuf_merges_discard_insert", "change_buffer",
- "Number of insert merged operations discarded",
- static_cast<monitor_type_t>(
- MONITOR_EXISTING | MONITOR_DEFAULT_ON),
- MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_DISCARD_INSERT},
-
- {"ibuf_merges_discard_delete_mark", "change_buffer",
- "Number of deleted merged operations discarded",
- static_cast<monitor_type_t>(
- MONITOR_EXISTING | MONITOR_DEFAULT_ON),
- MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_DISCARD_DELETE},
-
- {"ibuf_merges_discard_delete", "change_buffer",
- "Number of purge merged operations discarded",
- static_cast<monitor_type_t>(
- MONITOR_EXISTING | MONITOR_DEFAULT_ON),
- MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGE_DISCARD_PURGE},
-
- {"ibuf_merges", "change_buffer", "Number of change buffer merges",
- static_cast<monitor_type_t>(
- MONITOR_EXISTING | MONITOR_DEFAULT_ON),
- MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_MERGES},
-
- {"ibuf_size", "change_buffer", "Change buffer size in pages",
- static_cast<monitor_type_t>(
- MONITOR_EXISTING | MONITOR_DEFAULT_ON),
- MONITOR_DEFAULT_START, MONITOR_OVLD_IBUF_SIZE},
-
/* ========== Counters for server operations ========== */
{"module_innodb", "innodb",
"Counter for general InnoDB server wide operations and properties",
@@ -1531,38 +1453,6 @@ srv_mon_process_existing_counter(
value = fil_system.n_open;
break;
- case MONITOR_OVLD_IBUF_MERGE_INSERT:
- value = ibuf.n_merged_ops[IBUF_OP_INSERT];
- break;
-
- case MONITOR_OVLD_IBUF_MERGE_DELETE:
- value = ibuf.n_merged_ops[IBUF_OP_DELETE_MARK];
- break;
-
- case MONITOR_OVLD_IBUF_MERGE_PURGE:
- value = ibuf.n_merged_ops[IBUF_OP_DELETE];
- break;
-
- case MONITOR_OVLD_IBUF_MERGE_DISCARD_INSERT:
- value = ibuf.n_discarded_ops[IBUF_OP_INSERT];
- break;
-
- case MONITOR_OVLD_IBUF_MERGE_DISCARD_DELETE:
- value = ibuf.n_discarded_ops[IBUF_OP_DELETE_MARK];
- break;
-
- case MONITOR_OVLD_IBUF_MERGE_DISCARD_PURGE:
- value = ibuf.n_discarded_ops[IBUF_OP_DELETE];
- break;
-
- case MONITOR_OVLD_IBUF_MERGES:
- value = ibuf.n_merges;
- break;
-
- case MONITOR_OVLD_IBUF_SIZE:
- value = ibuf.size;
- break;
-
case MONITOR_OVLD_SERVER_ACTIVITY:
value = srv_get_activity_count();
break;
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index c5ccb7ee43b..d8babd40468 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -3,7 +3,7 @@
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -48,7 +48,6 @@ Created 10/8/1995 Heikki Tuuri
#include "buf0lru.h"
#include "dict0boot.h"
#include "dict0load.h"
-#include "ibuf0ibuf.h"
#include "lock0lock.h"
#include "log0recv.h"
#include "mem0mem.h"
@@ -126,9 +125,9 @@ my_bool srv_read_only_mode;
/** store to its own file each table created by an user; data
dictionary tables are in the system tablespace 0 */
my_bool srv_file_per_table;
-/** Set if InnoDB operates in read-only mode or innodb-force-recovery
-is greater than SRV_FORCE_NO_TRX_UNDO. */
-my_bool high_level_read_only;
+/** Set if innodb_read_only is set or innodb_force_recovery
+is SRV_FORCE_NO_UNDO_LOG_SCAN or greater. */
+bool high_level_read_only;
/** Sort buffer size in index creation */
ulong srv_sort_buf_size;
@@ -219,13 +218,6 @@ in the buffer cache and accessed sequentially for InnoDB to trigger a
readahead request. */
ulong srv_read_ahead_threshold;
-/** innodb_change_buffer_max_size; maximum on-disk size of change
-buffer in terms of percentage of the buffer pool. */
-uint srv_change_buffer_max_size;
-
-ulong srv_file_flush_method;
-
-
/** copy of innodb_open_files; @see innodb_init_params() */
ulint srv_max_n_open_files;
@@ -282,7 +274,7 @@ my_bool srv_print_all_deadlocks;
INFORMATION_SCHEMA.innodb_cmp_per_index */
my_bool srv_cmp_per_index_enabled;
-/** innodb_fast_shutdown=1 skips purge and change buffer merge.
+/** innodb_fast_shutdown=1 skips the purge of transaction history.
innodb_fast_shutdown=2 effectively crashes the server (no log checkpoint).
innodb_fast_shutdown=3 is a clean shutdown that skips the rollback
of active transaction (to be done on restart). */
@@ -384,8 +376,6 @@ FILE* srv_misc_tmpfile;
ulint srv_main_active_loops;
/** Iterations of the loop bounded by the 'srv_idle' label. */
ulint srv_main_idle_loops;
-/** Iterations of the loop bounded by the 'srv_shutdown' label. */
-static ulint srv_main_shutdown_loops;
/** Log writes involving flush. */
ulint srv_log_writes_and_flush;
@@ -569,10 +559,9 @@ srv_print_master_thread_info(
FILE *file) /* in: output stream */
{
fprintf(file, "srv_master_thread loops: " ULINTPF " srv_active, "
- ULINTPF " srv_shutdown, " ULINTPF " srv_idle\n"
+ ULINTPF " srv_idle\n"
"srv_master_thread log flush and writes: " ULINTPF "\n",
srv_main_active_loops,
- srv_main_shutdown_loops,
srv_main_idle_loops,
srv_log_writes_and_flush);
}
@@ -791,8 +780,6 @@ srv_printf_innodb_monitor(
"--------\n", file);
os_aio_print(file);
- ibuf_print(file);
-
#ifdef BTR_CUR_HASH_ADAPT
if (btr_search_enabled) {
fputs("-------------------\n"
@@ -1296,31 +1283,6 @@ static void srv_sync_log_buffer_in_background()
}
}
-/** Report progress during shutdown.
-@param last time of last output
-@param n_read number of page reads initiated for change buffer merge */
-static void srv_shutdown_print(time_t &last, ulint n_read)
-{
- time_t now= time(nullptr);
- if (now - last >= 15)
- {
- last= now;
-
- const ulint ibuf_size= ibuf.size;
- sql_print_information("Completing change buffer merge;"
- " %zu page reads initiated;"
- " %zu change buffer pages remain",
- n_read, ibuf_size);
-#if defined HAVE_SYSTEMD && !defined EMBEDDED_LIBRARY
- service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL,
- "Completing change buffer merge;"
- " %zu page reads initiated;"
- " %zu change buffer pages remain",
- n_read, ibuf_size);
-#endif
- }
-}
-
/** Perform periodic tasks whenever the server is active.
@param counter_time microsecond_interval_timer() */
static void srv_master_do_active_tasks(ulonglong counter_time)
@@ -1358,32 +1320,6 @@ static void srv_master_do_idle_tasks(ulonglong counter_time)
MONITOR_SRV_DICT_LRU_MICROSECOND, counter_time);
}
-/**
-Complete the shutdown tasks such as background DROP TABLE,
-and optionally change buffer merge (on innodb_fast_shutdown=0). */
-void srv_shutdown(bool ibuf_merge)
-{
- ulint n_read = 0;
- time_t now = time(NULL);
-
- do {
- ut_ad(!srv_read_only_mode);
- ut_ad(srv_shutdown_state == SRV_SHUTDOWN_CLEANUP);
- ++srv_main_shutdown_loops;
-
- if (ibuf_merge) {
- srv_main_thread_op_info = "doing insert buffer merge";
- /* Disallow the use of change buffer to
- avoid a race condition with
- ibuf_read_merge_pages() */
- ibuf_max_size_update(0);
- log_free_check();
- n_read = ibuf_contract();
- srv_shutdown_print(now, n_read);
- }
- } while (n_read);
-}
-
/** The periodic master task controlling the server. */
void srv_master_callback(void*)
{
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 2ed5ac57a0c..5266450ce10 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -3,7 +3,7 @@
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2022, MariaDB Corporation.
+Copyright (c) 2013, 2023, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -69,7 +69,6 @@ Created 2/16/1996 Heikki Tuuri
#include "btr0btr.h"
#include "btr0cur.h"
#include "rem0rec.h"
-#include "ibuf0ibuf.h"
#include "srv0start.h"
#include "srv0srv.h"
#include "btr0defragment.h"
@@ -95,6 +94,7 @@ Created 2/16/1996 Heikki Tuuri
#include "row0row.h"
#include "row0mysql.h"
#include "btr0pcur.h"
+#include "ibuf0ibuf.h"
#include "zlib.h"
#include "log.h"
@@ -1031,7 +1031,7 @@ srv_init_abort_low(
/** Prepare to delete the redo log file. Flush the dirty pages from all the
buffer pools. Flush the redo log buffer to the redo log file.
@return lsn upto which data pages have been flushed. */
-static lsn_t srv_prepare_to_delete_redo_log_file()
+ATTRIBUTE_COLD static lsn_t srv_prepare_to_delete_redo_log_file()
{
DBUG_ENTER("srv_prepare_to_delete_redo_log_file");
@@ -1098,6 +1098,67 @@ same_size:
DBUG_RETURN(flushed_lsn);
}
+/** Upgrade the redo log to the latest format, or change its size
+or encryption, before starting to write any log records. */
+ATTRIBUTE_COLD static dberr_t srv_log_rebuild()
+{
+ /* Prepare to delete the old redo log file */
+ const lsn_t lsn{srv_prepare_to_delete_redo_log_file()};
+
+ DBUG_EXECUTE_IF("innodb_log_abort_1", return DB_ERROR;);
+ /* Prohibit redo log writes from any other threads until creating a
+ log checkpoint at the end of create_log_file(). */
+ ut_d(recv_no_log_write= true);
+ DBUG_ASSERT(!buf_pool.any_io_pending());
+
+ /* Close the redo log file, so that we can replace it */
+ log_sys.close_file();
+
+ DBUG_EXECUTE_IF("innodb_log_abort_5", return DB_ERROR;);
+
+ dberr_t err= create_log_file(false, lsn);
+
+ if (err == DB_SUCCESS && log_sys.resize_rename())
+ err = DB_ERROR;
+
+ return err;
+}
+
+/** Rebuild the redo log if needed. */
+static dberr_t srv_log_rebuild_if_needed()
+{
+ if (srv_force_recovery == SRV_FORCE_NO_LOG_REDO)
+ /* Completely ignore the redo log. */
+ return DB_SUCCESS;
+ if (srv_read_only_mode)
+ /* Leave the redo log alone. */
+ return DB_SUCCESS;
+
+ if (log_sys.file_size == srv_log_file_size &&
+ log_sys.format ==
+ (srv_encrypt_log ? log_t::FORMAT_ENC_10_8 : log_t::FORMAT_10_8))
+ {
+ /* No need to add or remove encryption, upgrade, or resize. */
+ delete_log_files();
+ return DB_SUCCESS;
+ }
+
+ return srv_log_rebuild();
+}
+
+ATTRIBUTE_COLD static dberr_t ibuf_log_rebuild_if_needed()
+{
+ mysql_mutex_lock(&recv_sys.mutex);
+ recv_sys.apply(true);
+ mysql_mutex_unlock(&recv_sys.mutex);
+
+ if (recv_sys.is_corrupt_log() || recv_sys.is_corrupt_fs())
+ return DB_CORRUPTION;
+
+ recv_sys.debug_free();
+ return srv_log_rebuild_if_needed();
+}
+
static tpool::task_group rollback_all_recovered_group(1);
static tpool::task rollback_all_recovered_task(trx_rollback_all_recovered,
nullptr,
@@ -1136,10 +1197,6 @@ dberr_t srv_start(bool create_new_db)
ib::info() << "!!!!!!!! UNIV_DEBUG switched on !!!!!!!!!";
#endif
-#ifdef UNIV_IBUF_DEBUG
- ib::info() << "!!!!!!!! UNIV_IBUF_DEBUG switched on !!!!!!!!!";
-#endif
-
ib::info() << "Compressed tables use zlib " ZLIB_VERSION
#ifdef UNIV_ZIP_DEBUG
" with validation"
@@ -1235,11 +1292,6 @@ dberr_t srv_start(bool create_new_db)
return(srv_init_abort(err));
}
- if (srv_read_only_mode) {
- ib::info() << "Disabling background log and ibuf IO write"
- << " threads.";
- }
-
if (os_aio_init()) {
ib::error() << "Cannot initialize AIO sub-system";
@@ -1385,31 +1437,41 @@ dberr_t srv_start(bool create_new_db)
if (create_new_db) {
ut_ad(!srv_read_only_mode);
- mtr_start(&mtr);
+ mtr.start();
ut_ad(fil_system.sys_space->id == 0);
compile_time_assert(TRX_SYS_SPACE == 0);
- compile_time_assert(IBUF_SPACE_ID == 0);
- ut_a(fsp_header_init(fil_system.sys_space,
- uint32_t(sum_of_new_sizes), &mtr)
- == DB_SUCCESS);
-
- ulint ibuf_root = btr_create(
- DICT_CLUSTERED | DICT_IBUF, fil_system.sys_space,
- DICT_IBUF_ID_MIN, nullptr, &mtr, &err);
-
- mtr_commit(&mtr);
-
- if (ibuf_root == FIL_NULL) {
- return srv_init_abort(err);
+ err = fsp_header_init(fil_system.sys_space,
+ uint32_t(sum_of_new_sizes), &mtr);
+ /* Allocate dummy change buffer pages for backward
+ compatibility and to prevent a downgrade. */
+ if (err != DB_SUCCESS) {
+ } else if (buf_block_t *b =
+ fseg_create(fil_system.sys_space, PAGE_DATA, &mtr,
+ &err)) {
+ ut_ad(b->page.id()
+ == page_id_t(0, FSP_IBUF_HEADER_PAGE_NO));
+ b = fseg_alloc_free_page_general(
+ b->page.frame + PAGE_DATA,
+ FSP_IBUF_TREE_ROOT_PAGE_NO, FSP_UP, false,
+ &mtr, &mtr, &err);
+ if (b) {
+ ut_ad(b->page.id() == page_id_t
+ (0, FSP_IBUF_TREE_ROOT_PAGE_NO));
+ mtr.set_modified(*b);
+ fsp_init_file_page(fil_system.sys_space, b,
+ &mtr);
+ } else {
+ ut_ad(err != DB_SUCCESS);
+ }
}
-
- ut_ad(ibuf_root == IBUF_TREE_ROOT_PAGE_NO);
-
/* To maintain backward compatibility we create only
the first rollback segment before the double write buffer.
All the remaining rollback segments will be created later,
after the double write buffer has been created. */
- err = trx_sys_create_sys_pages(&mtr);
+ if (err == DB_SUCCESS) {
+ err = trx_sys_create_sys_pages(&mtr);
+ }
+ mtr.commit();
if (err != DB_SUCCESS) {
return(srv_init_abort(err));
@@ -1443,38 +1505,58 @@ dberr_t srv_start(bool create_new_db)
recv_sys.dblwr.pages.clear();
- if (err != DB_SUCCESS) {
- return(srv_init_abort(err));
- }
+ bool must_upgrade_ibuf = false;
switch (srv_operation) {
case SRV_OPERATION_NORMAL:
case SRV_OPERATION_RESTORE_EXPORT:
- /* Initialize the change buffer. */
- err = dict_boot();
if (err != DB_SUCCESS) {
- return(srv_init_abort(err));
+ break;
+ }
+
+ err = ibuf_upgrade_needed();
+
+ if (UNIV_UNLIKELY(err == DB_FAIL)) {
+ must_upgrade_ibuf = true;
+ err = ibuf_log_rebuild_if_needed();
}
+
+ if (err != DB_SUCCESS) {
+ break;
+ }
+
+ err = dict_boot();
/* fall through */
case SRV_OPERATION_RESTORE:
- /* This must precede recv_sys.apply(true). */
+ if (err != DB_SUCCESS) {
+ break;
+ }
+
srv_undo_tablespaces_active
= trx_rseg_get_n_undo_tablespaces();
if (srv_operation != SRV_OPERATION_RESTORE) {
dict_sys.load_sys_tables();
}
- err = trx_lists_init_at_db_start();
- if (err != DB_SUCCESS) {
- return srv_init_abort(err);
+
+ if (UNIV_UNLIKELY(must_upgrade_ibuf)) {
+ dict_load_tablespaces();
+ err = ibuf_upgrade();
+ if (err != DB_SUCCESS) {
+ break;
+ }
}
+
+ err = trx_lists_init_at_db_start();
break;
- case SRV_OPERATION_RESTORE_DELTA:
- case SRV_OPERATION_BACKUP:
- case SRV_OPERATION_BACKUP_NO_DEFER:
+ default:
ut_ad("wrong mariabackup mode" == 0);
}
+ if (err != DB_SUCCESS) {
+ return srv_init_abort(err);
+ }
+
if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
/* Apply the hashed log records to the
respective file pages, for the last batch of
@@ -1592,47 +1674,10 @@ dberr_t srv_start(bool create_new_db)
/* Upgrade or resize or rebuild the redo logs before
generating any dirty pages, so that the old redo log
file will not be written to. */
+ err = srv_log_rebuild_if_needed();
- if (srv_force_recovery == SRV_FORCE_NO_LOG_REDO) {
- /* Completely ignore the redo log. */
- } else if (srv_read_only_mode) {
- /* Leave the redo log alone. */
- } else if (log_sys.file_size == srv_log_file_size
- && log_sys.format
- == (srv_encrypt_log
- ? log_t::FORMAT_ENC_10_8
- : log_t::FORMAT_10_8)) {
- /* No need to add or remove encryption,
- upgrade, or resize. */
- delete_log_files();
- } else {
- /* Prepare to delete the old redo log file */
- const lsn_t lsn{srv_prepare_to_delete_redo_log_file()};
-
- DBUG_EXECUTE_IF("innodb_log_abort_1",
- return(srv_init_abort(DB_ERROR)););
- /* Prohibit redo log writes from any other
- threads until creating a log checkpoint at the
- end of create_log_file(). */
- ut_d(recv_no_log_write = true);
- DBUG_ASSERT(!buf_pool.any_io_pending());
-
- /* Close the redo log file, so that we can replace it */
- log_sys.close_file();
-
- DBUG_EXECUTE_IF("innodb_log_abort_5",
- return(srv_init_abort(DB_ERROR)););
- DBUG_PRINT("ib_log", ("After innodb_log_abort_5"));
-
- err = create_log_file(false, lsn);
-
- if (err == DB_SUCCESS && log_sys.resize_rename()) {
- err = DB_ERROR;
- }
-
- if (err != DB_SUCCESS) {
- return(srv_init_abort(err));
- }
+ if (err != DB_SUCCESS) {
+ return(srv_init_abort(err));
}
recv_sys.debug_free();
@@ -1686,8 +1731,7 @@ dberr_t srv_start(bool create_new_db)
/* Bitmap page types will be reset in
buf_dblwr_check_block() without redo logging. */
block = buf_page_get(
- page_id_t(IBUF_SPACE_ID,
- FSP_IBUF_HEADER_PAGE_NO),
+ page_id_t(0, FSP_IBUF_HEADER_PAGE_NO),
0, RW_X_LATCH, &mtr);
if (UNIV_UNLIKELY(!block)) {
corrupted_old_page:
@@ -1745,21 +1789,7 @@ dberr_t srv_start(bool create_new_db)
}
if (srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN) {
- /* The following call is necessary for the insert
- buffer to work with multiple tablespaces. We must
- know the mapping between space id's and .ibd file
- names.
-
- In a crash recovery, we check that the info in data
- dictionary is consistent with what we already know
- about space id's from the calls to fil_ibd_load().
-
- In a normal startup, we create the space objects for
- every table in the InnoDB data dictionary that has
- an .ibd file.
-
- We also determine the maximum tablespace id used. */
- dict_check_tablespaces_and_store_max_id();
+ dict_load_tablespaces();
}
if (srv_force_recovery < SRV_FORCE_NO_TRX_UNDO
@@ -1841,13 +1871,6 @@ skip_monitors:
trx_sys.get_max_trx_id());
}
- if (srv_force_recovery == 0) {
- /* In the change buffer we may have even bigger tablespace
- id's, because we may have dropped those tablespaces, but
- the buffered records have not been cleaned yet. */
- ibuf_update_max_tablespace_id();
- }
-
if (!srv_read_only_mode) {
if (create_new_db) {
srv_buffer_pool_load_at_startup = FALSE;
@@ -1902,10 +1925,6 @@ void innodb_preshutdown()
return;
if (!srv_fast_shutdown && srv_operation == SRV_OPERATION_NORMAL)
{
- /* Because a slow shutdown must empty the change buffer, we had
- better prevent any further changes from being buffered. */
- innodb_change_buffering= 0;
-
if (trx_sys.is_initialised())
while (trx_sys.any_active_transactions())
std::this_thread::sleep_for(std::chrono::milliseconds(1));
@@ -1971,8 +1990,6 @@ void innodb_shutdown()
|| srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
ut_ad(lock_sys.is_initialised() || !srv_was_started);
ut_ad(log_sys.is_initialised() || !srv_was_started);
- ut_ad(ibuf.index || !innodb_change_buffering || !srv_was_started
- || srv_force_recovery >= SRV_FORCE_NO_DDL_UNDO);
dict_stats_deinit();
@@ -1993,7 +2010,6 @@ void innodb_shutdown()
btr_search_disable();
}
#endif /* BTR_CUR_HASH_ADAPT */
- ibuf_close();
log_sys.close();
purge_sys.close();
trx_sys.close();
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index 867126adc0e..841b014019b 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -262,7 +262,6 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
trx_ulogf_t* undo_header = undo_page->page.frame
+ undo->hdr_offset;
- ut_ad(mach_read_from_2(undo_header + TRX_UNDO_NEEDS_PURGE) <= 1);
ut_ad(rseg->needs_purge > trx->id);
if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT
@@ -352,8 +351,6 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
mtr->write<8,mtr_t::MAYBE_NOP>(*undo_page,
undo_header + TRX_UNDO_TRX_NO,
trx->rw_trx_hash_element->no);
- mtr->write<2,mtr_t::MAYBE_NOP>(*undo_page, undo_header
- + TRX_UNDO_NEEDS_PURGE, 1U);
if (rseg->last_page_no == FIL_NULL) {
rseg->last_page_no = undo->hdr_page_no;
@@ -913,12 +910,8 @@ static void trx_purge_rseg_get_next_history_log(
if (const buf_block_t* undo_page=
buf_page_get_gen(page_id_t(purge_sys.rseg->space->id, prev_log_addr.page),
0, RW_S_LATCH, nullptr, BUF_GET_POSSIBLY_FREED, &mtr))
- {
- const byte *log_hdr= undo_page->page.frame + prev_log_addr.boffset;
-
- trx_no= mach_read_from_8(log_hdr + TRX_UNDO_TRX_NO);
- ut_ad(mach_read_from_2(log_hdr + TRX_UNDO_NEEDS_PURGE) <= 1);
- }
+ trx_no= mach_read_from_8(undo_page->page.frame + prev_log_addr.boffset +
+ TRX_UNDO_TRX_NO);
mtr.commit();
diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc
index 3fada7d34aa..1dc3c18fc09 100644
--- a/storage/innobase/trx/trx0rseg.cc
+++ b/storage/innobase/trx/trx0rseg.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -446,7 +446,7 @@ static dberr_t trx_rseg_mem_restore(trx_rseg_t *rseg, mtr_t *mtr)
return DB_TABLESPACE_NOT_FOUND;
dberr_t err;
const buf_block_t *rseg_hdr=
- buf_page_get_gen(rseg->page_id(), 0, RW_S_LATCH, nullptr, BUF_GET, mtr,
+ buf_page_get_gen(rseg->page_id(), 0, RW_X_LATCH, nullptr, BUF_GET, mtr,
&err);
if (!rseg_hdr)
return err;
@@ -522,8 +522,6 @@ static dberr_t trx_rseg_mem_restore(trx_rseg_t *rseg, mtr_t *mtr)
rseg->needs_purge= id;
rseg->set_last_commit(node_addr.boffset, id);
- ut_ad(mach_read_from_2(block->page.frame + node_addr.boffset +
- TRX_UNDO_NEEDS_PURGE) <= 1);
if (rseg->last_page_no != FIL_NULL)
/* There is no need to cover this operation by the purge
@@ -574,7 +572,7 @@ dberr_t trx_rseg_array_init()
for (ulint rseg_id = 0; rseg_id < TRX_SYS_N_RSEGS; rseg_id++) {
mtr.start();
- if (const buf_block_t* sys = trx_sysf_get(&mtr, false)) {
+ if (const buf_block_t* sys = trx_sysf_get(&mtr, true)) {
if (rseg_id == 0) {
/* In case this is an upgrade from
before MariaDB 10.3.5, fetch the base
diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc
index 374a9d724bc..ab8c0b34e7d 100644
--- a/storage/innobase/trx/trx0sys.cc
+++ b/storage/innobase/trx/trx0sys.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2022, MariaDB Corporation.
+Copyright (c) 2017, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -105,7 +105,6 @@ trx_sysf_get_n_rseg_slots()
/** Initialize the transaction system when creating the database. */
dberr_t trx_sys_create_sys_pages(mtr_t *mtr)
{
- mtr->start();
mtr->x_lock_space(fil_system.sys_space);
static_assert(TRX_SYS_SPACE == 0, "compatibility");
@@ -114,11 +113,7 @@ dberr_t trx_sys_create_sys_pages(mtr_t *mtr)
buf_block_t *block= fseg_create(fil_system.sys_space,
TRX_SYS + TRX_SYS_FSEG_HEADER, mtr, &err);
if (UNIV_UNLIKELY(!block))
- {
- error:
- mtr->commit();
return err;
- }
ut_a(block->page.id() == page_id_t(0, TRX_SYS_PAGE_NO));
mtr->write<2>(*block, FIL_PAGE_TYPE + block->page.frame,
@@ -138,9 +133,8 @@ dberr_t trx_sys_create_sys_pages(mtr_t *mtr)
buf_block_t *r= trx_rseg_header_create(fil_system.sys_space, 0, 0,
mtr, &err);
if (UNIV_UNLIKELY(!r))
- goto error;
+ return err;
ut_a(r->page.id() == page_id_t(0, FSP_FIRST_RSEG_PAGE_NO));
- mtr->commit();
return trx_lists_init_at_db_start();
}
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index bc41a535dbe..e88f7824ba6 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2022, MariaDB Corporation.
+Copyright (c) 2015, 2023, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -412,12 +412,12 @@ void trx_t::free()
#endif
read_view.mem_noaccess();
MEM_NOACCESS(&lock, sizeof lock);
- MEM_NOACCESS(&op_info, sizeof op_info);
- MEM_NOACCESS(&isolation_level, sizeof isolation_level);
- MEM_NOACCESS(&check_foreigns, sizeof check_foreigns);
+ MEM_NOACCESS(&op_info, sizeof op_info +
+ sizeof(unsigned) /* isolation_level,
+ check_foreigns, check_unique_secondary,
+ bulk_insert */);
MEM_NOACCESS(&is_registered, sizeof is_registered);
MEM_NOACCESS(&active_commit_ordered, sizeof active_commit_ordered);
- MEM_NOACCESS(&check_unique_secondary, sizeof check_unique_secondary);
MEM_NOACCESS(&flush_log_later, sizeof flush_log_later);
MEM_NOACCESS(&must_flush_log_later, sizeof must_flush_log_later);
MEM_NOACCESS(&duplicates, sizeof duplicates);
@@ -1155,7 +1155,7 @@ static void trx_flush_log_if_needed_low(lsn_t lsn, const trx_t *trx)
callback= &cb;
}
- log_write_up_to(lsn, srv_file_flush_method != SRV_NOSYNC &&
+ log_write_up_to(lsn, !my_disable_sync &&
(srv_flush_log_at_trx_commit & 1), callback);
}
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index 33b1f93ff65..3b9c598e745 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -588,13 +588,8 @@ static uint16_t trx_undo_header_create(buf_block_t *undo_page, trx_id_t trx_id,
undo_page->page.frame) != 0))
mtr->memset(undo_page, free + TRX_UNDO_TRX_NO, 8, 0);
- /* Write TRX_UNDO_NEEDS_PURGE=1 and TRX_UNDO_LOG_START. */
- mach_write_to_2(buf, 1);
- memcpy_aligned<2>(buf + 2, start, 2);
- static_assert(TRX_UNDO_NEEDS_PURGE + 2 == TRX_UNDO_LOG_START,
- "compatibility");
- mtr->memcpy<mtr_t::MAYBE_NOP>(*undo_page, free + TRX_UNDO_NEEDS_PURGE +
- undo_page->page.frame, buf, 4);
+ mtr->memcpy<mtr_t::MAYBE_NOP>(*undo_page, free + TRX_UNDO_LOG_START +
+ undo_page->page.frame, start, 2);
/* Initialize all fields TRX_UNDO_XID_EXISTS to TRX_UNDO_HISTORY_NODE. */
if (prev_log)
{
diff --git a/storage/maria/CMakeLists.txt b/storage/maria/CMakeLists.txt
index f55d78f0162..033e88bb8e5 100644
--- a/storage/maria/CMakeLists.txt
+++ b/storage/maria/CMakeLists.txt
@@ -135,4 +135,5 @@ IF(TARGET s3)
TARGET_LINK_LIBRARIES(aria_s3_copy aria myisam mysys mysys_ssl ${CURL_LIBRARIES} ${ZLIB_LIBRARY})
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/libmarias3)
ADD_DEFINITIONS(-DWITH_S3_STORAGE_ENGINE)
+ INSTALL_MANPAGES(s3-engine aria_s3_copy.1)
ENDIF()
diff --git a/storage/maria/aria_chk.c b/storage/maria/aria_chk.c
index 7d5598f06b5..61821ec9099 100644
--- a/storage/maria/aria_chk.c
+++ b/storage/maria/aria_chk.c
@@ -15,6 +15,7 @@
/* Describe, check and repair of MARIA tables */
+#define VER "1.3"
#include "ma_fulltext.h"
#include <myisamchk.h>
#include <my_bit.h>
@@ -25,6 +26,7 @@
/* Remove next line if you want aria_chk to produce a stack trace */
#undef HAVE_BACKTRACE
#include <my_stacktrace.h>
+#include <welcome_copyright_notice.h>
static uint decode_bits;
static char **default_argv;
@@ -79,7 +81,6 @@ static char default_open_errmsg[]= "%d when opening Aria table '%s'";
static char default_close_errmsg[]= "%d when closing Aria table '%s'";
static void get_options(int *argc,char * * *argv);
-static void print_version(void);
static void usage(void);
static int maria_chk(HA_CHECK *param, char *filename);
static void descript(HA_CHECK *param, register MARIA_HA *info, char *name);
@@ -471,13 +472,6 @@ static struct my_option my_long_options[] =
};
-static void print_version(void)
-{
- printf("%s Ver 1.3 for %s on %s\n", my_progname, SYSTEM_TYPE,
- MACHINE_TYPE);
-}
-
-
static void usage(void)
{
print_version();
@@ -1624,6 +1618,8 @@ static void descript(HA_CHECK *param, register MARIA_HA *info, char *name)
pos=strmov(pos,"sorted index pages,");
if (!(share->state.changed & STATE_NOT_ZEROFILLED))
pos=strmov(pos,"zerofilled,");
+ if (test_all_bits(share->state.changed, (STATE_NOT_ZEROFILLED | STATE_HAS_LSN)))
+ pos=strmov(pos,"has_lsn,");
if (!(share->state.changed & STATE_NOT_MOVABLE))
pos=strmov(pos,"movable,");
if (have_control_file && (share->state.changed & STATE_MOVED))
diff --git a/storage/maria/aria_dump_log.c b/storage/maria/aria_dump_log.c
index e64c97fcda3..4317e1b6f3b 100644
--- a/storage/maria/aria_dump_log.c
+++ b/storage/maria/aria_dump_log.c
@@ -13,8 +13,11 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
+#define VER "1.1"
#include "maria_def.h"
#include <my_getopt.h>
+#include <welcome_copyright_notice.h>
+
extern void translog_example_table_init();
static const char *load_default_groups[]= { "aria_dump_log",0 };
static void get_options(int *argc,char * * *argv);
@@ -64,13 +67,6 @@ static struct my_option my_long_options[] =
};
-static void print_version(void)
-{
- printf("%s Ver 1.1 for %s on %s\n",
- my_progname_short, SYSTEM_TYPE, MACHINE_TYPE);
-}
-
-
static void usage(void)
{
print_version();
diff --git a/storage/maria/aria_pack.c b/storage/maria/aria_pack.c
index 40e7e399613..eab4d512e8b 100644
--- a/storage/maria/aria_pack.c
+++ b/storage/maria/aria_pack.c
@@ -19,6 +19,7 @@
#define USE_MY_FUNC /* We need at least my_malloc */
#endif
+#define VER "1.0"
#include "maria_def.h"
#include "trnman_public.h"
#include "trnman.h"
@@ -33,6 +34,7 @@
#endif
#include <my_getopt.h>
#include <my_handler_errors.h>
+#include <welcome_copyright_notice.h>
#if SIZEOF_LONG_LONG > 4
#define BITS_SAVED 64
@@ -353,12 +355,6 @@ static struct my_option my_long_options[] =
};
-static void print_version(void)
-{
- printf("%s Ver 1.0 for %s on %s\n", my_progname, SYSTEM_TYPE, MACHINE_TYPE);
-}
-
-
static void usage(void)
{
print_version();
diff --git a/storage/maria/aria_read_log.c b/storage/maria/aria_read_log.c
index c0c76ed5590..85a6f4a5e97 100644
--- a/storage/maria/aria_read_log.c
+++ b/storage/maria/aria_read_log.c
@@ -139,6 +139,12 @@ int main(int argc, char **argv)
if (opt_display_only)
printf("You are using --display-only, NOTHING will be written to disk\n");
+ if (translog_get_horizon() == LSN_IMPOSSIBLE)
+ {
+ fprintf(stdout, "The transaction log is empty\n");
+ goto end;
+ }
+
lsn= translog_first_lsn_in_log();
if (lsn == LSN_ERROR)
{
@@ -147,7 +153,8 @@ int main(int argc, char **argv)
}
if (lsn == LSN_IMPOSSIBLE)
{
- fprintf(stdout, "The transaction log is empty\n");
+ fprintf(stdout, "The transaction log is empty\n");
+ goto end;
}
if (opt_start_from_checkpoint && !opt_start_from_lsn &&
last_checkpoint_lsn != LSN_IMPOSSIBLE)
@@ -300,7 +307,7 @@ static struct my_option my_long_options[] =
static void print_version(void)
{
- printf("%s Ver 1.5 for %s on %s\n",
+ printf("%s Ver 1.6 for %s on %s\n",
my_progname_short, SYSTEM_TYPE, MACHINE_TYPE);
}
@@ -308,7 +315,7 @@ static void print_version(void)
static void usage(void)
{
print_version();
- puts("Copyright (C) 2007 MySQL AB, 2009-2011 Monty Program Ab, 2020 MariaDB Corporation");
+ puts("Copyright (C) 2007 MySQL AB, 2009-2011 Monty Program Ab, 2022 MariaDB Corporation");
puts("This software comes with ABSOLUTELY NO WARRANTY. This is free software,");
puts("and you are welcome to modify and redistribute it under the GPL license\n");
diff --git a/man/aria_s3_copy.1 b/storage/maria/aria_s3_copy.1
index 5844d5a76f0..5844d5a76f0 100644
--- a/man/aria_s3_copy.1
+++ b/storage/maria/aria_s3_copy.1
diff --git a/storage/maria/aria_s3_copy.cc b/storage/maria/aria_s3_copy.cc
index 77c41ba4572..5c8c2abc7db 100644
--- a/storage/maria/aria_s3_copy.cc
+++ b/storage/maria/aria_s3_copy.cc
@@ -17,6 +17,7 @@
Allow copying of Aria tables to and from S3 and also delete them from S3
*/
+#define VER "1.0"
#include <my_global.h>
#include <m_string.h>
#include "maria_def.h"
@@ -28,6 +29,7 @@
#include <zlib.h>
#include <libmarias3/marias3.h>
#include "s3_func.h"
+#include <welcome_copyright_notice.h>
static const char *op_types[]= {"to_s3", "from_s3", "delete_from_s3", NullS};
static TYPELIB op_typelib= {array_elements(op_types)-1,"", op_types, NULL};
@@ -109,12 +111,6 @@ static struct my_option my_long_options[] =
static bool get_database_from_path(char *to, size_t to_length, const char *path);
-static void print_version(void)
-{
- printf("%s Ver 1.0 for %s on %s\n", my_progname, SYSTEM_TYPE,
- MACHINE_TYPE);
-}
-
static void usage(void)
{
print_version();
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index c245dcea036..f82823dba2b 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -1094,21 +1094,52 @@ ulong ha_maria::index_flags(uint inx, uint part, bool all_parts) const
}
else
{
- flags= HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
- HA_READ_ORDER | HA_KEYREAD_ONLY | HA_DO_INDEX_COND_PUSHDOWN;
+ flags= (HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
+ HA_READ_ORDER | HA_KEYREAD_ONLY | HA_DO_INDEX_COND_PUSHDOWN |
+ HA_DO_RANGE_FILTER_PUSHDOWN);
}
return flags;
}
-double ha_maria::scan_time()
+/*
+ Update costs that are unique for this TABLE instance
+*/
+
+void ha_maria::update_optimizer_costs(OPTIMIZER_COSTS *costs)
{
- if (file->s->data_file_type == BLOCK_RECORD)
- return (ulonglong2double(stats.data_file_length - file->s->block_size) /
- file->s->block_size) + 2;
- return handler::scan_time();
+ /*
+ Default costs for Aria with BLOCK_FORMAT is the same as MariaDB default
+ costs.
+ */
+ if (file->s->data_file_type != BLOCK_RECORD)
+ {
+ /*
+ MyISAM format row lookup costs are slow as the row data is on a not
+ cached file. Costs taken from ha_myisam.cc
+ */
+ costs->row_next_find_cost= 0.000063539;
+ costs->row_lookup_cost= 0.001014818;
+ }
}
+
+IO_AND_CPU_COST ha_maria::rnd_pos_time(ha_rows rows)
+{
+ IO_AND_CPU_COST cost= handler::rnd_pos_time(rows);
+ /* file may be 0 if this is an internal temporary file that is not yet opened */
+ if (file && file->s->data_file_type != BLOCK_RECORD)
+ {
+ /*
+ Row data is not cached. costs.row_lookup_cost includes the cost of
+ the reading the row from system (probably cached by the OS).
+ */
+ cost.io= 0;
+ }
+ return cost;
+}
+
+
/*
We need to be able to store at least 2 keys on an index page as the
splitting algorithms depends on this. (With only one key on a page
@@ -2505,10 +2536,12 @@ int ha_maria::index_read_idx_map(uchar * buf, uint index, const uchar * key,
end_range= NULL;
if (index == pushed_idx_cond_keyno)
ma_set_index_cond_func(file, handler_index_cond_check, this);
+ if (pushed_rowid_filter && handler_rowid_filter_is_active(this))
+ ma_set_rowid_filter_func(file, handler_rowid_filter_check, this);
error= maria_rkey(file, buf, index, key, keypart_map, find_flag);
- ma_set_index_cond_func(file, NULL, 0);
+ ma_reset_index_filter_functions(file);
return error;
}
@@ -2582,18 +2615,22 @@ int ha_maria::index_next_same(uchar * buf,
int ha_maria::index_init(uint idx, bool sorted)
{
- active_index=idx;
+ active_index= idx;
if (pushed_idx_cond_keyno == idx)
ma_set_index_cond_func(file, handler_index_cond_check, this);
+ if (pushed_rowid_filter && handler_rowid_filter_is_active(this))
+ ma_set_rowid_filter_func(file, handler_rowid_filter_check, this);
return 0;
}
-
int ha_maria::index_end()
{
+ /*
+ in_range_check_pushed_down and index_id_cond_keyno are reset in
+ handler::cancel_pushed_idx_cond()
+ */
active_index=MAX_KEY;
- ma_set_index_cond_func(file, NULL, 0);
- in_range_check_pushed_down= FALSE;
+ ma_reset_index_filter_functions(file);
ds_mrr.dsmrr_close();
return 0;
}
@@ -2707,8 +2744,8 @@ int ha_maria::info(uint flag)
}
}
/*
- Set data_file_name and index_file_name to point at the symlink value
- if table is symlinked (Ie; Real name is not same as generated name)
+ Set data_file_name and index_file_name to point at the symlink value
+ if table is symlinked (Ie; Real name is not same as generated name)
*/
data_file_name= index_file_name= 0;
fn_format(name_buff, file->s->open_file_name.str, "", MARIA_NAME_DEXT,
@@ -2791,7 +2828,7 @@ int ha_maria::extra(enum ha_extra_function operation)
int ha_maria::reset(void)
{
- ma_set_index_cond_func(file, NULL, 0);
+ ma_reset_index_filter_functions(file);
ds_mrr.dsmrr_close();
if (file->trn)
{
@@ -2825,8 +2862,9 @@ bool ha_maria::auto_repair(int error) const
int ha_maria::delete_all_rows()
{
THD *thd= table->in_use;
- TRN *trn= file->trn;
+ TRN *trn= file->s->now_transactional ? file->trn : (TRN*) 0;
CHECK_UNTIL_WE_FULLY_IMPLEMENTED_VERSIONING("TRUNCATE in WRITE CONCURRENT");
+
#ifdef EXTRA_DEBUG
if (trn && ! (trnman_get_flags(trn) & TRN_STATE_INFO_LOGGED))
{
@@ -2840,8 +2878,7 @@ int ha_maria::delete_all_rows()
If we are under LOCK TABLES, we have to do a commit as
delete_all_rows() can't be rolled back
*/
- if (table->in_use->locked_tables_mode && trn &&
- trnman_has_locked_tables(trn))
+ if (trn && table->in_use->locked_tables_mode && trnman_has_locked_tables(trn))
{
int error;
if ((error= implicit_commit(thd, 1)))
@@ -3849,6 +3886,10 @@ bool ha_maria::is_changed() const
return file->state->changed;
}
+static void aria_update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+}
+
static int ha_maria_init(void *p)
{
@@ -3881,6 +3922,7 @@ static int ha_maria_init(void *p)
maria_hton->show_status= maria_show_status;
maria_hton->prepare_for_backup= maria_prepare_for_backup;
maria_hton->end_backup= maria_end_backup;
+ maria_hton->update_optimizer_costs= aria_update_optimizer_costs;
/* TODO: decide if we support Maria being used for log tables */
maria_hton->flags= (HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES |
@@ -4181,7 +4223,8 @@ int ha_maria::multi_range_read_next(range_id_t *range_info)
ha_rows ha_maria::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *flags, Cost_estimate *cost)
+ uint *flags, ha_rows limit,
+ Cost_estimate *cost)
{
/*
This call is here because there is no location where this->table would
@@ -4190,7 +4233,7 @@ ha_rows ha_maria::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
*/
ds_mrr.init(this, table);
return ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param, n_ranges, bufsz,
- flags, cost);
+ flags, limit, cost);
}
ha_rows ha_maria::multi_range_read_info(uint keyno, uint n_ranges, uint keys,
@@ -4241,6 +4284,26 @@ Item *ha_maria::idx_cond_push(uint keyno_arg, Item* idx_cond_arg)
return NULL;
}
+bool ha_maria::rowid_filter_push(Rowid_filter* rowid_filter)
+{
+ /* This will be used in index_init() */
+ pushed_rowid_filter= rowid_filter;
+ return false;
+}
+
+
+/* Enable / disable rowid filter depending if it's active or not */
+
+void ha_maria::rowid_filter_changed()
+{
+ if (pushed_rowid_filter && handler_rowid_filter_is_active(this))
+ ma_set_rowid_filter_func(file, handler_rowid_filter_check, this);
+ else
+ ma_set_rowid_filter_func(file, NULL, this);
+}
+
+
+
/**
Find record by unique constrain (used in temporary tables)
diff --git a/storage/maria/ha_maria.h b/storage/maria/ha_maria.h
index 2b8b5dc9742..009e8ca5fe7 100644
--- a/storage/maria/ha_maria.h
+++ b/storage/maria/ha_maria.h
@@ -77,8 +77,6 @@ public:
{ return max_supported_key_length(); }
enum row_type get_row_type() const override final;
void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share) override final;
- virtual double scan_time() override final;
-
int open(const char *name, int mode, uint test_if_locked) override;
int close(void) override final;
int write_row(const uchar * buf) override;
@@ -114,6 +112,8 @@ public:
int remember_rnd_pos() override final;
int restart_rnd_next(uchar * buf) override final;
void position(const uchar * record) override final;
+ void update_optimizer_costs(OPTIMIZER_COSTS *costs) override final;
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows) override final;
int info(uint) override final;
int info(uint, my_bool);
int extra(enum ha_extra_function operation) override final;
@@ -175,7 +175,8 @@ public:
ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *flags, Cost_estimate *cost) override final;
+ uint *flags, ha_rows limit,
+ Cost_estimate *cost) override final;
ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
uint key_parts, uint *bufsz,
uint *flags, Cost_estimate *cost) override final;
@@ -183,6 +184,8 @@ public:
/* Index condition pushdown implementation */
Item *idx_cond_push(uint keyno, Item* idx_cond) override final;
+ bool rowid_filter_push(Rowid_filter* rowid_filter) override;
+ void rowid_filter_changed() override;
int find_unique_row(uchar *record, uint unique_idx) override final;
diff --git a/storage/maria/ma_bitmap.c b/storage/maria/ma_bitmap.c
index 61fe4f9d080..ec1b0955655 100644
--- a/storage/maria/ma_bitmap.c
+++ b/storage/maria/ma_bitmap.c
@@ -1172,6 +1172,7 @@ static my_bool move_to_next_bitmap(MARIA_HA *info, MARIA_FILE_BITMAP *bitmap)
{
pgcache_page_no_t page= bitmap->page;
MARIA_STATE_INFO *state= &info->s->state;
+ my_bool res;
DBUG_ENTER("move_to_next_bitmap");
if (state->first_bitmap_with_space != ~(pgcache_page_no_t) 0 &&
@@ -1186,7 +1187,8 @@ static my_bool move_to_next_bitmap(MARIA_HA *info, MARIA_FILE_BITMAP *bitmap)
page+= bitmap->pages_covered;
DBUG_ASSERT(page % bitmap->pages_covered == 0);
}
- DBUG_RETURN(_ma_change_bitmap_page(info, bitmap, page));
+ res= _ma_change_bitmap_page(info, bitmap, page);
+ DBUG_RETURN(res);
}
diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c
index 98ef5c21e55..c56721a2359 100644
--- a/storage/maria/ma_blockrec.c
+++ b/storage/maria/ma_blockrec.c
@@ -5284,6 +5284,7 @@ my_bool _ma_scan_init_block_record(MARIA_HA *info)
{
MARIA_SHARE *share= info->s;
myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0);
+ my_bool res;
DBUG_ENTER("_ma_scan_init_block_record");
DBUG_ASSERT(info->dfile.file == share->bitmap.file.file);
@@ -5310,7 +5311,8 @@ my_bool _ma_scan_init_block_record(MARIA_HA *info)
_ma_scan_block_record()), we may miss recently inserted rows (bitmap page
in page cache would be too old).
*/
- DBUG_RETURN(_ma_bitmap_flush(info->s));
+ res= _ma_bitmap_flush(info->s);
+ DBUG_RETURN(res);
}
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index f470d3691c1..45d6df7a63a 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -3651,28 +3651,35 @@ err:
int maria_zerofill(HA_CHECK *param, MARIA_HA *info, const char *name)
{
- my_bool error, reenable_logging,
+ my_bool error= 0, reenable_logging,
zero_lsn= !(param->testflag & T_ZEROFILL_KEEP_LSN);
MARIA_SHARE *share= info->s;
DBUG_ENTER("maria_zerofill");
if ((reenable_logging= share->now_transactional))
_ma_tmp_disable_logging_for_table(info, 0);
- if (!(error= (maria_zerofill_index(param, info, name) ||
- maria_zerofill_data(param, info, name) ||
- _ma_set_uuid(info->s, 0))))
+
+ if (share->state.changed & (STATE_NOT_ZEROFILLED | (zero_lsn ? STATE_HAS_LSN : 0)))
+ error= (maria_zerofill_index(param, info, name) ||
+ maria_zerofill_data(param, info, name));
+ if (!error)
+ error= _ma_set_uuid(info->s, 0);
+
+ if (!error)
{
/*
- Mark that we have done zerofill of data and index. If we zeroed pages'
- LSN, table is movable.
+ Mark that we have done zerofill of data and index. If we zeroed the LSN
+ on the pages, table is movable.
*/
share->state.changed&= ~STATE_NOT_ZEROFILLED;
if (zero_lsn)
{
- share->state.changed&= ~(STATE_NOT_MOVABLE | STATE_MOVED);
+ share->state.changed&= ~(STATE_NOT_MOVABLE | STATE_MOVED | STATE_HAS_LSN);
/* Table should get new LSNs */
share->state.create_rename_lsn= share->state.is_of_horizon=
share->state.skip_redo_lsn= LSN_NEEDS_NEW_STATE_LSNS;
}
+ else
+ share->state.changed|= STATE_HAS_LSN;
/* Ensure state is later flushed to disk, if within maria_chk */
info->update= (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
diff --git a/storage/maria/ma_control_file.c b/storage/maria/ma_control_file.c
index 21befb70bd9..237b75b99b7 100644
--- a/storage/maria/ma_control_file.c
+++ b/storage/maria/ma_control_file.c
@@ -104,7 +104,7 @@ one should increment the control file version number.
This LSN serves for the two-checkpoint rule, and also to find the
checkpoint record when doing a recovery.
*/
-LSN last_checkpoint_lsn= LSN_IMPOSSIBLE;
+volatile LSN last_checkpoint_lsn= LSN_IMPOSSIBLE;
uint32 last_logno= FILENO_IMPOSSIBLE;
/**
The maximum transaction id given to a transaction. It is only updated at
diff --git a/storage/maria/ma_control_file.h b/storage/maria/ma_control_file.h
index 40428f665f4..c74957b8322 100644
--- a/storage/maria/ma_control_file.h
+++ b/storage/maria/ma_control_file.h
@@ -37,7 +37,7 @@ C_MODE_START
LSN of the last checkoint
(if last_checkpoint_lsn == LSN_IMPOSSIBLE then there was never a checkpoint)
*/
-extern LSN last_checkpoint_lsn;
+extern volatile LSN last_checkpoint_lsn;
/*
Last log number (if last_logno == FILENO_IMPOSSIBLE then there is no log
file yet)
diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c
index 425cb421e22..087100e3d8c 100644
--- a/storage/maria/ma_extra.c
+++ b/storage/maria/ma_extra.c
@@ -510,8 +510,17 @@ void ma_set_index_cond_func(MARIA_HA *info, index_cond_func_t func,
{
info->index_cond_func= func;
info->index_cond_func_arg= func_arg;
+ info->has_cond_pushdown= (info->index_cond_func || info->rowid_filter_func);
}
+void ma_set_rowid_filter_func(MARIA_HA *info,
+ rowid_filter_func_t check_func,
+ void *func_arg)
+{
+ info->rowid_filter_func= check_func;
+ info->rowid_filter_func_arg= func_arg;
+ info->has_cond_pushdown= (info->index_cond_func || info->rowid_filter_func);
+}
/*
Start/Stop Inserting Duplicates Into a Table, WL#1648.
diff --git a/storage/maria/ma_info.c b/storage/maria/ma_info.c
index ddf92654be0..3de6b8b74c5 100644
--- a/storage/maria/ma_info.c
+++ b/storage/maria/ma_info.c
@@ -20,14 +20,6 @@
#include <sys/stat.h>
#endif
- /* Get position to last record */
-
-MARIA_RECORD_POS maria_position(MARIA_HA *info)
-{
- return info->cur_row.lastpos;
-}
-
-
uint maria_max_key_length()
{
uint tmp= (_ma_max_key_length() - 8 - HA_MAX_KEY_SEG*3);
diff --git a/storage/maria/ma_key.c b/storage/maria/ma_key.c
index d47e8cf715a..1b58c1c12c8 100644
--- a/storage/maria/ma_key.c
+++ b/storage/maria/ma_key.c
@@ -678,22 +678,44 @@ int _ma_read_key_record(MARIA_HA *info, uchar *buf, MARIA_RECORD_POS filepos)
CHECK_OUT_OF_RANGE to indicate that we don't have any active row.
*/
-check_result_t ma_check_index_cond(register MARIA_HA *info, uint keynr,
- uchar *record)
+check_result_t ma_check_index_cond_real(register MARIA_HA *info, uint keynr,
+ uchar *record)
{
check_result_t res= CHECK_POS;
+ DBUG_ASSERT(info->index_cond_func || info->rowid_filter_func);
+
+ if (_ma_put_key_in_record(info, keynr, FALSE, record))
+ {
+ /* Impossible case; Can only happen if bug in code */
+ _ma_print_error(info, HA_ERR_CRASHED, 0);
+ info->cur_row.lastpos= HA_OFFSET_ERROR; /* No active record */
+ my_errno= HA_ERR_CRASHED;
+ return CHECK_ERROR;
+ }
+
if (info->index_cond_func)
{
- if (_ma_put_key_in_record(info, keynr, FALSE, record))
+ if ((res= info->index_cond_func(info->index_cond_func_arg)) ==
+ CHECK_OUT_OF_RANGE)
{
- /* Impossible case; Can only happen if bug in code */
- _ma_print_error(info, HA_ERR_CRASHED, 0);
+ /* We got beyond the end of scanned range */
info->cur_row.lastpos= HA_OFFSET_ERROR; /* No active record */
- my_errno= HA_ERR_CRASHED;
- res= CHECK_ERROR;
+ my_errno= HA_ERR_END_OF_FILE;
+ return res;
}
- else if ((res= info->index_cond_func(info->index_cond_func_arg)) ==
- CHECK_OUT_OF_RANGE)
+ /*
+ If we got an error, out-of-range condition, or ICP condition computed to
+ FALSE - we don't need to check the Rowid Filter.
+ */
+ if (res != CHECK_POS)
+ return res;
+ }
+
+ /* Check the Rowid Filter, if present */
+ if (info->rowid_filter_func)
+ {
+ if ((res= info->rowid_filter_func(info->rowid_filter_func_arg)) ==
+ CHECK_OUT_OF_RANGE)
{
/* We got beyond the end of scanned range */
info->cur_row.lastpos= HA_OFFSET_ERROR; /* No active record */
diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c
index 8e6426e3aa4..c3615e5271c 100644
--- a/storage/maria/ma_loghandler.c
+++ b/storage/maria/ma_loghandler.c
@@ -478,7 +478,7 @@ static my_bool translog_page_validator(int res, PAGECACHE_IO_HOOK_ARGS *args);
static my_bool translog_get_next_chunk(TRANSLOG_SCANNER_DATA *scanner);
static uint32 translog_first_file(TRANSLOG_ADDRESS horizon, int is_protected);
LSN translog_next_LSN(TRANSLOG_ADDRESS addr, TRANSLOG_ADDRESS horizon);
-
+static void translog_free_link(PAGECACHE_BLOCK_LINK *direct_link);
/*
Initialize log_record_type_descriptors
@@ -3116,7 +3116,10 @@ restart:
PAGECACHE_PLAIN_PAGE,
PAGECACHE_LOCK_LEFT_UNLOCKED,
NULL)))
+ {
+ translog_unlock();
DBUG_RETURN(NULL);
+ }
}
else
skipped_data= 0; /* Read after skipped in buffer data */
@@ -3217,6 +3220,11 @@ restart:
PAGECACHE_LOCK_READ :
PAGECACHE_LOCK_LEFT_UNLOCKED),
direct_link);
+ if (!buffer && direct_link)
+ {
+ translog_free_link(*direct_link);
+ *direct_link= 0;
+ }
DBUG_PRINT("info", ("Direct link is assigned to : %p * %p",
direct_link,
(direct_link ? *direct_link : NULL)));
@@ -3786,16 +3794,26 @@ my_bool translog_init_with_table(const char *directory,
}
else if (LSN_OFFSET(last_page) == 0)
{
- if (LSN_FILE_NO(last_page) == 1)
+ if (LSN_FILE_NO(last_page) == 1 ||
+ !translog_is_file(LSN_FILE_NO(last_page-1)))
{
logs_found= 0; /* file #1 has no pages */
DBUG_PRINT("info", ("log found. But is is empty => no log assumed"));
}
else
{
- last_page-= LSN_ONE_FILE;
- if (translog_get_last_page_addr(&last_page, &pageok, 0))
- goto err;
+ do
+ {
+ last_page-= LSN_ONE_FILE;
+ if (translog_get_last_page_addr(&last_page, &pageok, 0))
+ goto err;
+ }
+ while (LSN_OFFSET(last_page) == 0 && LSN_FILE_NO(last_page) >= 1);
+ if (LSN_OFFSET(last_page) == 0)
+ {
+ /* All files have a size less than TRANSLOG_PAGE_SIZE */
+ logs_found= 0;
+ }
}
}
if (logs_found)
@@ -3893,36 +3911,38 @@ my_bool translog_init_with_table(const char *directory,
old_log_was_recovered= 1;
/* This file is not written till the end so it should be last */
last_page= current_file_last_page;
- /* TODO: issue warning */
}
- do
+ if (LSN_OFFSET(current_file_last_page) >= TRANSLOG_PAGE_SIZE)
{
- TRANSLOG_VALIDATOR_DATA data;
- TRANSLOG_PAGE_SIZE_BUFF psize_buff;
- uchar *page;
- data.addr= &current_page;
- if ((page= translog_get_page(&data, psize_buff.buffer, NULL)) == NULL)
- goto err;
- if (data.was_recovered)
+ do
{
- DBUG_PRINT("error", ("file no: %lu (%d) "
- "rec_offset: 0x%lx (%lu) (%d)",
- (ulong) LSN_FILE_NO(current_page),
- (uint3korr(page + 3) !=
- LSN_FILE_NO(current_page)),
- (ulong) LSN_OFFSET(current_page),
- (ulong) (LSN_OFFSET(current_page) /
- TRANSLOG_PAGE_SIZE),
- (uint3korr(page) !=
- LSN_OFFSET(current_page) /
- TRANSLOG_PAGE_SIZE)));
- old_log_was_recovered= 1;
- break;
- }
- old_flags= page[TRANSLOG_PAGE_FLAGS];
- last_valid_page= current_page;
- current_page+= TRANSLOG_PAGE_SIZE; /* increase offset */
- } while (current_page <= current_file_last_page);
+ TRANSLOG_VALIDATOR_DATA data;
+ TRANSLOG_PAGE_SIZE_BUFF psize_buff;
+ uchar *page;
+ data.addr= &current_page;
+ if ((page= translog_get_page(&data, psize_buff.buffer, NULL)) == NULL)
+ goto err;
+ if (data.was_recovered)
+ {
+ DBUG_PRINT("error", ("file no: %lu (%d) "
+ "rec_offset: 0x%lx (%lu) (%d)",
+ (ulong) LSN_FILE_NO(current_page),
+ (uint3korr(page + 3) !=
+ LSN_FILE_NO(current_page)),
+ (ulong) LSN_OFFSET(current_page),
+ (ulong) (LSN_OFFSET(current_page) /
+ TRANSLOG_PAGE_SIZE),
+ (uint3korr(page) !=
+ LSN_OFFSET(current_page) /
+ TRANSLOG_PAGE_SIZE)));
+ old_log_was_recovered= 1;
+ break;
+ }
+ old_flags= page[TRANSLOG_PAGE_FLAGS];
+ last_valid_page= current_page;
+ current_page+= TRANSLOG_PAGE_SIZE; /* increase offset */
+ } while (current_page <= current_file_last_page);
+ }
current_page+= LSN_ONE_FILE;
current_page= LSN_REPLACE_OFFSET(current_page, TRANSLOG_PAGE_SIZE);
} while (LSN_FILE_NO(current_page) <= LSN_FILE_NO(last_page) &&
@@ -4014,7 +4034,7 @@ my_bool translog_init_with_table(const char *directory,
}
DBUG_PRINT("info", ("Logs found: %d was recovered: %d",
logs_found, old_log_was_recovered));
- if (!logs_found)
+ if (!logs_found && !readonly)
{
TRANSLOG_FILE *file= (TRANSLOG_FILE*)my_malloc(PSI_INSTRUMENT_ME,
sizeof(TRANSLOG_FILE), MYF(MY_WME));
@@ -4064,6 +4084,10 @@ my_bool translog_init_with_table(const char *directory,
translog_start_buffer(log_descriptor.buffers, &log_descriptor.bc, 0);
translog_new_page_header(&log_descriptor.horizon, &log_descriptor.bc);
}
+ else if (readonly && !logs_found)
+ {
+ log_descriptor.horizon= LSN_IMPOSSIBLE;
+ }
/* all LSNs that are on disk are flushed */
log_descriptor.log_start= log_descriptor.sent_to_disk=
@@ -4145,21 +4169,24 @@ my_bool translog_init_with_table(const char *directory,
uint32 file_no= LSN_FILE_NO(page_addr);
my_bool last_page_ok;
/* it is beginning of the current file */
- if (unlikely(file_no == 1))
+ do
{
- /*
- It is beginning of the log => there is no LSNs in the log =>
- There is no harm in leaving it "as-is".
+ if (unlikely(file_no == 1))
+ {
+ /*
+ It is beginning of the log => there is no LSNs in the log =>
+ There is no harm in leaving it "as-is".
*/
- log_descriptor.previous_flush_horizon= log_descriptor.horizon;
- DBUG_PRINT("info", ("previous_flush_horizon: " LSN_FMT,
- LSN_IN_PARTS(log_descriptor.
+ log_descriptor.previous_flush_horizon= log_descriptor.horizon;
+ DBUG_PRINT("info", ("previous_flush_horizon: " LSN_FMT,
+ LSN_IN_PARTS(log_descriptor.
previous_flush_horizon)));
- DBUG_RETURN(0);
- }
- file_no--;
- page_addr= MAKE_LSN(file_no, TRANSLOG_PAGE_SIZE);
- translog_get_last_page_addr(&page_addr, &last_page_ok, 0);
+ DBUG_RETURN(0);
+ }
+ file_no--;
+ page_addr= MAKE_LSN(file_no, TRANSLOG_PAGE_SIZE);
+ translog_get_last_page_addr(&page_addr, &last_page_ok, 0);
+ } while (LSN_OFFSET(page_addr) == 0);
/* page should be OK as it is not the last file */
DBUG_ASSERT(last_page_ok);
}
@@ -6905,17 +6932,19 @@ translog_get_next_chunk(TRANSLOG_SCANNER_DATA *scanner)
/* if it is log end it have to be caught before */
DBUG_ASSERT(LSN_FILE_NO(scanner->horizon) >
LSN_FILE_NO(scanner->page_addr));
- scanner->page_addr+= LSN_ONE_FILE;
- scanner->page_addr= LSN_REPLACE_OFFSET(scanner->page_addr,
- TRANSLOG_PAGE_SIZE);
- if (translog_scanner_set_last_page(scanner))
- DBUG_RETURN(1);
+ do
+ {
+ scanner->page_addr+= LSN_ONE_FILE;
+ scanner->page_addr= LSN_REPLACE_OFFSET(scanner->page_addr,
+ TRANSLOG_PAGE_SIZE);
+ if (translog_scanner_set_last_page(scanner))
+ DBUG_RETURN(1);
+ } while (!LSN_OFFSET(scanner->last_file_page));
}
else
{
scanner->page_addr+= TRANSLOG_PAGE_SIZE; /* offset increased */
}
-
if (translog_scanner_get_page(scanner))
DBUG_RETURN(1);
@@ -6926,7 +6955,9 @@ translog_get_next_chunk(TRANSLOG_SCANNER_DATA *scanner)
scanner->page_offset= 0;
DBUG_RETURN(0);
}
+#ifdef CHECK_EMPTY_PAGE
DBUG_ASSERT(scanner->page[scanner->page_offset] != TRANSLOG_FILLER);
+#endif
}
DBUG_RETURN(0);
}
diff --git a/storage/maria/ma_loghandler.h b/storage/maria/ma_loghandler.h
index 3e5c58a8053..abe85a12727 100644
--- a/storage/maria/ma_loghandler.h
+++ b/storage/maria/ma_loghandler.h
@@ -25,7 +25,11 @@
/* minimum possible transaction log size */
#define TRANSLOG_MIN_FILE_SIZE (8*MB)
/* transaction log default flags (TODO: make it global variable) */
+#ifdef HAVE_DBUG_TRANSLOG_CRC
+#define TRANSLOG_DEFAULT_FLAGS IF_DBUG(TRANSLOG_PAGE_CRC,0)
+#else
#define TRANSLOG_DEFAULT_FLAGS 0
+#endif
/*
Transaction log flags.
diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c
index 144b10a86da..c4c85d0bdd0 100644
--- a/storage/maria/ma_pagecache.c
+++ b/storage/maria/ma_pagecache.c
@@ -3876,7 +3876,7 @@ restart:
{
pagecache_pthread_mutex_unlock(&pagecache->cache_lock);
DBUG_ASSERT(0);
- return (uchar*) 0;
+ DBUG_RETURN((uchar*) 0);
}
}
/*
@@ -5227,7 +5227,7 @@ int flush_pagecache_blocks_with_filter(PAGECACHE *pagecache,
{
int res;
DBUG_ENTER("flush_pagecache_blocks_with_filter");
- DBUG_PRINT("enter", ("pagecache: %p", pagecache));
+ DBUG_PRINT("enter", ("pagecache: %p fd: %di", pagecache, file->file));
if (pagecache->disk_blocks <= 0)
DBUG_RETURN(0);
diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c
index 006c8bef672..90d0ed3c708 100644
--- a/storage/maria/ma_recovery.c
+++ b/storage/maria/ma_recovery.c
@@ -133,7 +133,7 @@ static void new_transaction(uint16 sid, TrID long_id, LSN undo_lsn,
static int new_table(uint16 sid, const char *name, LSN lsn_of_file_id);
static int new_page(uint32 fileid, pgcache_page_no_t pageid, LSN rec_lsn,
struct st_dirty_page *dirty_page);
-static int close_all_tables(void);
+static int close_all_tables(my_bool force_end_newline);
static my_bool close_one_table(const char *name, TRANSLOG_ADDRESS addr);
static void print_redo_phase_progress(TRANSLOG_ADDRESS addr);
static void delete_all_transactions();
@@ -467,7 +467,7 @@ int maria_apply_log(LSN from_lsn, LSN end_redo_lsn, LSN end_undo_lsn,
we don't use maria_panic() because it would maria_end(), and Recovery does
not want that (we want to keep some modules initialized for runtime).
*/
- if (close_all_tables())
+ if (close_all_tables(0))
{
ma_message_no_user(0, "closing of tables failed");
goto err;
@@ -495,6 +495,8 @@ int maria_apply_log(LSN from_lsn, LSN end_redo_lsn, LSN end_undo_lsn,
/* No dirty pages, all tables are closed, no active transactions, save: */
if (ma_checkpoint_execute(CHECKPOINT_FULL, FALSE))
goto err;
+ tprint(tracef, "checkpoint done at " LSN_FMT "\n",
+ LSN_IN_PARTS(last_checkpoint_lsn));
}
goto end;
@@ -505,7 +507,7 @@ err2:
delete_all_transactions();
if (!abort_message_printed)
error= 1;
- if (close_all_tables())
+ if (close_all_tables(1))
{
ma_message_no_user(0, "closing of tables failed");
}
@@ -3472,7 +3474,7 @@ static int new_page(uint32 fileid, pgcache_page_no_t pageid, LSN rec_lsn,
}
-static int close_all_tables(void)
+static int close_all_tables(my_bool force_end_newline)
{
int error= 0;
uint count= 0;
@@ -3537,7 +3539,7 @@ static int close_all_tables(void)
}
}
end:
- if (recovery_message_printed == REC_MSG_FLUSH)
+ if (recovery_message_printed == REC_MSG_FLUSH && (force_end_newline || error))
{
fputc('\n', stderr);
fflush(stderr);
diff --git a/storage/maria/ma_recovery_util.c b/storage/maria/ma_recovery_util.c
index fe43d812600..b8123c422c1 100644
--- a/storage/maria/ma_recovery_util.c
+++ b/storage/maria/ma_recovery_util.c
@@ -87,7 +87,7 @@ void eprint(FILE *trace_file __attribute__ ((unused)),
if (!trace_file)
trace_file= stderr;
- if (procent_printed)
+ if (procent_printed && trace_file == stderr)
{
procent_printed= 0;
/* In silent mode, print on another line than the 0% 10% 20% line */
diff --git a/storage/maria/ma_rkey.c b/storage/maria/ma_rkey.c
index 8cd82e1c6fc..7e43ed4befa 100644
--- a/storage/maria/ma_rkey.c
+++ b/storage/maria/ma_rkey.c
@@ -120,6 +120,7 @@ int maria_rkey(MARIA_HA *info, uchar *buf, int inx, const uchar *key_data,
/* The key references a concurrently inserted record. */
if (search_flag == HA_READ_KEY_EXACT &&
+ (keyinfo->flag & HA_NOSAME) &&
last_used_keyseg == keyinfo->seg + keyinfo->keysegs)
{
/* Simply ignore the key if it matches exactly. (Bug #29838) */
diff --git a/storage/maria/ma_scan.c b/storage/maria/ma_scan.c
index 5f2945a3078..3e789489090 100644
--- a/storage/maria/ma_scan.c
+++ b/storage/maria/ma_scan.c
@@ -48,10 +48,12 @@ int maria_scan_init(register MARIA_HA *info)
int maria_scan(MARIA_HA *info, uchar *record)
{
+ int res;
DBUG_ENTER("maria_scan");
/* Init all but update-flag */
info->update&= (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
- DBUG_RETURN((*info->s->scan)(info, record, info->cur_row.nextpos, 1));
+ res= (*info->s->scan)(info, record, info->cur_row.nextpos, 1);
+ DBUG_RETURN(res);
}
diff --git a/storage/maria/ma_write.c b/storage/maria/ma_write.c
index 1dbee5d744a..88f3d22c205 100644
--- a/storage/maria/ma_write.c
+++ b/storage/maria/ma_write.c
@@ -428,14 +428,15 @@ err2:
my_bool _ma_ck_write(MARIA_HA *info, MARIA_KEY *key)
{
+ my_bool tmp;
DBUG_ENTER("_ma_ck_write");
if (info->bulk_insert &&
is_tree_inited(&info->bulk_insert[key->keyinfo->key_nr]))
- {
- DBUG_RETURN(_ma_ck_write_tree(info, key));
- }
- DBUG_RETURN(_ma_ck_write_btree(info, key));
+ tmp= _ma_ck_write_tree(info, key);
+ else
+ tmp= _ma_ck_write_btree(info, key);
+ DBUG_RETURN(tmp);
} /* _ma_ck_write */
diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h
index c7aef97072b..dc164dfce14 100644
--- a/storage/maria/maria_def.h
+++ b/storage/maria/maria_def.h
@@ -43,6 +43,14 @@
C_MODE_START
+#ifdef _WIN32
+/*
+ We cannot use mmap() on Windows with Aria as mmap() can cause file
+ size to increase in _ma_dynmap_file(). The extra \0 data causes
+ the file to be regarded as corrupted.
+*/
+#undef HAVE_MMAP
+#endif
/*
Limit max keys according to HA_MAX_POSSIBLE_KEY; See myisamchk.h for details
*/
@@ -213,7 +221,6 @@ extern int maria_rsame_with_pos(MARIA_HA *file, uchar *record,
extern int maria_update(MARIA_HA *file, const uchar *old,
const uchar *new_record);
extern int maria_write(MARIA_HA *file, const uchar *buff);
-extern MARIA_RECORD_POS maria_position(MARIA_HA *file);
extern int maria_status(MARIA_HA *info, MARIA_INFO *x, uint flag);
extern int maria_lock_database(MARIA_HA *file, int lock_type);
extern int maria_delete_table(const char *name);
@@ -1011,6 +1018,7 @@ struct st_maria_handler
my_bool switched_transactional;
/* If transaction will autocommit */
my_bool autocommit;
+ my_bool has_cond_pushdown;
#ifdef _WIN32
my_bool owned_by_merge; /* This Maria table is part of a merge union */
#endif
@@ -1022,6 +1030,8 @@ struct st_maria_handler
my_bool create_unique_index_by_sort;
index_cond_func_t index_cond_func; /* Index condition function */
void *index_cond_func_arg; /* parameter for the func */
+ rowid_filter_func_t rowid_filter_func; /* rowid filter check function */
+ void *rowid_filter_func_arg; /* parameter for the func */
};
/* Table options for the Aria and S3 storage engine */
@@ -1063,6 +1073,7 @@ struct ha_table_option_struct
#define STATE_IN_REPAIR 1024U /* We are running repair on table */
#define STATE_CRASHED_PRINTED 2048U
#define STATE_DATA_FILE_FULL 4096U
+#define STATE_HAS_LSN 8192U /* Some page still has LSN */
#define STATE_CRASHED_FLAGS (STATE_CRASHED | STATE_CRASHED_ON_REPAIR | STATE_CRASHED_PRINTED)
@@ -1346,7 +1357,11 @@ extern int _ma_read_rnd_no_record(MARIA_HA *info, uchar *buf,
MARIA_RECORD_POS filepos,
my_bool skip_deleted_blocks);
my_off_t _ma_no_keypos_to_recpos(MARIA_SHARE *share, my_off_t pos);
-
+/* Get position to last record */
+static inline MARIA_RECORD_POS maria_position(MARIA_HA *info)
+{
+ return info->cur_row.lastpos;
+}
extern my_bool _ma_ck_write(MARIA_HA *info, MARIA_KEY *key);
extern my_bool _ma_enlarge_root(MARIA_HA *info, MARIA_KEY *key,
MARIA_RECORD_POS *root);
@@ -1733,7 +1748,25 @@ extern my_bool maria_flush_log_for_page_none(PAGECACHE_IO_HOOK_ARGS *args);
extern PAGECACHE *maria_log_pagecache;
extern void ma_set_index_cond_func(MARIA_HA *info, index_cond_func_t func,
void *func_arg);
-check_result_t ma_check_index_cond(MARIA_HA *info, uint keynr, uchar *record);
+extern void ma_set_rowid_filter_func(MARIA_HA *info,
+ rowid_filter_func_t check_func,
+ void *func_arg);
+static inline void ma_reset_index_filter_functions(MARIA_HA *info)
+{
+ info->index_cond_func= NULL;
+ info->rowid_filter_func= NULL;
+ info->has_cond_pushdown= 0;
+}
+check_result_t ma_check_index_cond_real(MARIA_HA *info, uint keynr,
+ uchar *record);
+static inline check_result_t ma_check_index_cond(MARIA_HA *info, uint keynr,
+ uchar *record)
+{
+ if (!info->has_cond_pushdown)
+ return CHECK_POS;
+ return ma_check_index_cond_real(info, keynr, record);
+}
+
extern my_bool ma_yield_and_check_if_killed(MARIA_HA *info, int inx);
extern my_bool ma_killed_standalone(MARIA_HA *);
diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp
index 7787f8b83b5..85d6473ded3 100644
--- a/storage/mroonga/ha_mroonga.cpp
+++ b/storage/mroonga/ha_mroonga.cpp
@@ -4308,6 +4308,7 @@ int ha_mroonga::wrapper_open(const char *name, int mode, uint open_options)
wrap_handler->set_ha_share_ref(&table->s->ha_share);
#endif
error = wrap_handler->ha_open(table, name, mode, open_options);
+ wrap_handler->set_optimizer_costs(ha_thd());
} else {
if (!(wrap_handler = parent_for_clone->wrap_handler->clone(name,
mem_root_for_clone)))
@@ -12313,6 +12314,7 @@ ha_rows ha_mroonga::wrapper_multi_range_read_info_const(uint keyno,
uint n_ranges,
uint *bufsz,
uint *flags,
+ ha_rows limit,
Cost_estimate *cost)
{
MRN_DBUG_ENTER_METHOD();
@@ -12320,7 +12322,8 @@ ha_rows ha_mroonga::wrapper_multi_range_read_info_const(uint keyno,
KEY *key_info = &(table->key_info[keyno]);
if (mrn_is_geo_key(key_info)) {
rows = handler::multi_range_read_info_const(keyno, seq, seq_init_param,
- n_ranges, bufsz, flags, cost);
+ n_ranges, bufsz, flags, limit,
+ cost);
DBUG_RETURN(rows);
}
MRN_SET_WRAP_SHARE_KEY(share, table->s);
@@ -12329,7 +12332,7 @@ ha_rows ha_mroonga::wrapper_multi_range_read_info_const(uint keyno,
set_pk_bitmap();
rows = wrap_handler->multi_range_read_info_const(keyno, seq, seq_init_param,
n_ranges, bufsz, flags,
- cost);
+ limit, cost);
MRN_SET_BASE_SHARE_KEY(share, table->s);
MRN_SET_BASE_TABLE_KEY(this, table);
DBUG_RETURN(rows);
@@ -12341,20 +12344,21 @@ ha_rows ha_mroonga::storage_multi_range_read_info_const(uint keyno,
uint n_ranges,
uint *bufsz,
uint *flags,
+ ha_rows limit,
Cost_estimate *cost)
{
MRN_DBUG_ENTER_METHOD();
ha_rows rows = handler::multi_range_read_info_const(keyno, seq,
seq_init_param,
n_ranges, bufsz, flags,
- cost);
+ limit, cost);
DBUG_RETURN(rows);
}
ha_rows ha_mroonga::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *flags,
+ uint *flags, ha_rows limit,
Cost_estimate *cost)
{
MRN_DBUG_ENTER_METHOD();
@@ -12363,11 +12367,11 @@ ha_rows ha_mroonga::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
{
rows = wrapper_multi_range_read_info_const(keyno, seq, seq_init_param,
n_ranges, bufsz,
- flags, cost);
+ flags, limit, cost);
} else {
rows = storage_multi_range_read_info_const(keyno, seq, seq_init_param,
n_ranges, bufsz,
- flags, cost);
+ flags, limit, cost);
}
DBUG_RETURN(rows);
}
@@ -13008,9 +13012,9 @@ int ha_mroonga::truncate()
DBUG_RETURN(error);
}
-double ha_mroonga::wrapper_scan_time()
+IO_AND_CPU_COST ha_mroonga::wrapper_scan_time()
{
- double res;
+ IO_AND_CPU_COST res;
MRN_DBUG_ENTER_METHOD();
MRN_SET_WRAP_SHARE_KEY(share, table->s);
MRN_SET_WRAP_TABLE_KEY(this, table);
@@ -13020,17 +13024,16 @@ double ha_mroonga::wrapper_scan_time()
DBUG_RETURN(res);
}
-double ha_mroonga::storage_scan_time()
+IO_AND_CPU_COST ha_mroonga::storage_scan_time()
{
MRN_DBUG_ENTER_METHOD();
- double time = handler::scan_time();
- DBUG_RETURN(time);
+ DBUG_RETURN(handler::scan_time());
}
-double ha_mroonga::scan_time()
+IO_AND_CPU_COST ha_mroonga::scan_time()
{
MRN_DBUG_ENTER_METHOD();
- double time;
+ IO_AND_CPU_COST time;
if (share->wrapper_mode)
{
time = wrapper_scan_time();
@@ -13040,51 +13043,87 @@ double ha_mroonga::scan_time()
DBUG_RETURN(time);
}
-double ha_mroonga::wrapper_read_time(uint index, uint ranges, ha_rows rows)
+IO_AND_CPU_COST ha_mroonga::wrapper_rnd_pos_time(ha_rows rows)
+{
+ IO_AND_CPU_COST res;
+ MRN_DBUG_ENTER_METHOD();
+ MRN_SET_WRAP_SHARE_KEY(share, table->s);
+ MRN_SET_WRAP_TABLE_KEY(this, table);
+ res = wrap_handler->rnd_pos_time(rows);
+ MRN_SET_BASE_SHARE_KEY(share, table->s);
+ MRN_SET_BASE_TABLE_KEY(this, table);
+ DBUG_RETURN(res);
+}
+
+IO_AND_CPU_COST ha_mroonga::storage_rnd_pos_time(ha_rows rows)
{
- double res;
+ MRN_DBUG_ENTER_METHOD();
+ IO_AND_CPU_COST time = handler::rnd_pos_time(rows);
+ DBUG_RETURN(time);
+}
+
+
+IO_AND_CPU_COST ha_mroonga::rnd_pos_time(ha_rows rows)
+{
+ MRN_DBUG_ENTER_METHOD();
+ IO_AND_CPU_COST time;
+ if (share->wrapper_mode)
+ {
+ time = wrapper_rnd_pos_time(rows);
+ } else {
+ time = storage_rnd_pos_time(rows);
+ }
+ DBUG_RETURN(time);
+}
+
+
+IO_AND_CPU_COST ha_mroonga::wrapper_keyread_time(uint index, ulong ranges,
+ ha_rows rows, ulonglong blocks)
+{
+ IO_AND_CPU_COST res;
MRN_DBUG_ENTER_METHOD();
if (index < MAX_KEY) {
KEY *key_info = &(table->key_info[index]);
if (mrn_is_geo_key(key_info)) {
- res = handler::read_time(index, ranges, rows);
+ res = handler::keyread_time(index, ranges, rows, blocks);
DBUG_RETURN(res);
}
MRN_SET_WRAP_SHARE_KEY(share, table->s);
MRN_SET_WRAP_TABLE_KEY(this, table);
- res = wrap_handler->read_time(share->wrap_key_nr[index], ranges, rows);
+ res = wrap_handler->keyread_time(share->wrap_key_nr[index], ranges, rows, blocks);
MRN_SET_BASE_SHARE_KEY(share, table->s);
MRN_SET_BASE_TABLE_KEY(this, table);
} else {
MRN_SET_WRAP_SHARE_KEY(share, table->s);
MRN_SET_WRAP_TABLE_KEY(this, table);
- res = wrap_handler->read_time(index, ranges, rows);
+ res = wrap_handler->keyread_time(index, ranges, rows, blocks);
MRN_SET_BASE_SHARE_KEY(share, table->s);
MRN_SET_BASE_TABLE_KEY(this, table);
}
DBUG_RETURN(res);
}
-double ha_mroonga::storage_read_time(uint index, uint ranges, ha_rows rows)
+IO_AND_CPU_COST ha_mroonga::storage_keyread_time(uint index, ulong ranges, ha_rows rows, ulonglong blocks)
{
MRN_DBUG_ENTER_METHOD();
- double time = handler::read_time(index, ranges, rows);
+ IO_AND_CPU_COST time = handler::keyread_time(index, ranges, rows, blocks);
DBUG_RETURN(time);
}
-double ha_mroonga::read_time(uint index, uint ranges, ha_rows rows)
+IO_AND_CPU_COST ha_mroonga::keyread_time(uint index, ulong ranges, ha_rows rows, ulonglong blocks)
{
MRN_DBUG_ENTER_METHOD();
- double time;
+ IO_AND_CPU_COST time;
if (share->wrapper_mode)
{
- time = wrapper_read_time(index, ranges, rows);
+ time = wrapper_keyread_time(index, ranges, rows, blocks);
} else {
- time = storage_read_time(index, ranges, rows);
+ time = storage_keyread_time(index, ranges, rows, blocks);
}
DBUG_RETURN(time);
}
+
#ifdef MRN_HANDLER_HAVE_KEYS_TO_USE_FOR_SCANNING
const key_map *ha_mroonga::wrapper_keys_to_use_for_scanning()
{
diff --git a/storage/mroonga/ha_mroonga.hpp b/storage/mroonga/ha_mroonga.hpp
index 66767899e21..27219ffd158 100644
--- a/storage/mroonga/ha_mroonga.hpp
+++ b/storage/mroonga/ha_mroonga.hpp
@@ -505,7 +505,8 @@ public:
ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *flags, Cost_estimate *cost) mrn_override;
+ uint *flags, ha_rows limit,
+ Cost_estimate *cost) mrn_override;
ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
uint key_parts,
@@ -531,8 +532,9 @@ public:
int end_bulk_insert() mrn_override;
int delete_all_rows() mrn_override;
int truncate() mrn_override;
- double scan_time() mrn_override;
- double read_time(uint index, uint ranges, ha_rows rows) mrn_override;
+ IO_AND_CPU_COST scan_time() mrn_override;
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows) mrn_override;
+ IO_AND_CPU_COST keyread_time(uint index, ulong ranges, ha_rows rows, ulonglong blocks) mrn_override;
#ifdef MRN_HANDLER_HAVE_KEYS_TO_USE_FOR_SCANNING
const key_map *keys_to_use_for_scanning() mrn_override;
#endif
@@ -1056,6 +1058,7 @@ private:
uint n_ranges,
uint *bufsz,
uint *flags,
+ ha_rows limit,
Cost_estimate *cost);
ha_rows storage_multi_range_read_info_const(uint keyno,
RANGE_SEQ_IF *seq,
@@ -1063,6 +1066,7 @@ private:
uint n_ranges,
uint *bufsz,
uint *flags,
+ ha_rows limit,
Cost_estimate *cost);
ha_rows wrapper_multi_range_read_info(uint keyno, uint n_ranges, uint keys,
#ifdef MRN_HANDLER_HAVE_MULTI_RANGE_READ_INFO_KEY_PARTS
@@ -1106,10 +1110,12 @@ private:
int wrapper_truncate_index();
int storage_truncate();
int storage_truncate_index();
- double wrapper_scan_time();
- double storage_scan_time();
- double wrapper_read_time(uint index, uint ranges, ha_rows rows);
- double storage_read_time(uint index, uint ranges, ha_rows rows);
+ IO_AND_CPU_COST wrapper_scan_time();
+ IO_AND_CPU_COST storage_scan_time();
+ IO_AND_CPU_COST wrapper_rnd_pos_time(ha_rows rows);
+ IO_AND_CPU_COST storage_rnd_pos_time(ha_rows rows);
+ IO_AND_CPU_COST wrapper_keyread_time(uint index, ulong ranges, ha_rows rows, ulonglong blocks);
+ IO_AND_CPU_COST storage_keyread_time(uint index, ulong ranges, ha_rows rows, ulonglong blocks);
#ifdef MRN_HANDLER_HAVE_KEYS_TO_USE_FOR_SCANNING
const key_map *wrapper_keys_to_use_for_scanning();
const key_map *storage_keys_to_use_for_scanning();
diff --git a/storage/mroonga/mysql-test/mroonga/storage/r/optimization_count_skip_index_not_equal.result b/storage/mroonga/mysql-test/mroonga/storage/r/optimization_count_skip_index_not_equal.result
index a1a123e7d5f..837ca2b6381 100644
--- a/storage/mroonga/mysql-test/mroonga/storage/r/optimization_count_skip_index_not_equal.result
+++ b/storage/mroonga/mysql-test/mroonga/storage/r/optimization_count_skip_index_not_equal.result
@@ -9,6 +9,9 @@ INSERT INTO users (age) VALUES (28);
INSERT INTO users (age) VALUES (29);
INSERT INTO users (age) VALUES (29);
INSERT INTO users (age) VALUES (29);
+explain SELECT COUNT(*) FROM users WHERE age <> 29;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE users range age age 5 NULL 4 Using where; Using index
SELECT COUNT(*) FROM users WHERE age <> 29;
COUNT(*)
2
diff --git a/storage/mroonga/mysql-test/mroonga/storage/t/optimization_count_skip_index_not_equal.test b/storage/mroonga/mysql-test/mroonga/storage/t/optimization_count_skip_index_not_equal.test
index 3948d218a69..7e6cf5f510b 100644
--- a/storage/mroonga/mysql-test/mroonga/storage/t/optimization_count_skip_index_not_equal.test
+++ b/storage/mroonga/mysql-test/mroonga/storage/t/optimization_count_skip_index_not_equal.test
@@ -33,6 +33,7 @@ INSERT INTO users (age) VALUES (29);
INSERT INTO users (age) VALUES (29);
INSERT INTO users (age) VALUES (29);
+explain SELECT COUNT(*) FROM users WHERE age <> 29;
SELECT COUNT(*) FROM users WHERE age <> 29;
SHOW STATUS LIKE 'mroonga_count_skip';
diff --git a/storage/mroonga/mysql-test/mroonga/wrapper/r/geometry_contains.result b/storage/mroonga/mysql-test/mroonga/wrapper/r/geometry_contains.result
index 550554eac8c..6dd6dd25f3f 100644
--- a/storage/mroonga/mysql-test/mroonga/wrapper/r/geometry_contains.result
+++ b/storage/mroonga/mysql-test/mroonga/wrapper/r/geometry_contains.result
@@ -154,7 +154,7 @@ id name location_text
select id, name, ST_AsText(location) as location_text from shops
where MBRContains(ST_GeomFromText('LineString(139.7727 35.6684, 139.7038 35.7121)'), location);
id name location_text
+26 kazuya POINT(139.760895 35.673508)
14 tetsuji POINT(139.76857 35.680912)
19 daruma POINT(139.770599 35.681461)
-26 kazuya POINT(139.760895 35.673508)
drop table shops;
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index c0419da7e71..b1b1e8fd57e 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -804,6 +804,17 @@ ulong ha_myisam::index_flags(uint inx, uint part, bool all_parts) const
return flags;
}
+IO_AND_CPU_COST ha_myisam::rnd_pos_time(ha_rows rows)
+{
+ IO_AND_CPU_COST cost= handler::rnd_pos_time(rows);
+ /*
+ Row data is not cached. costs.row_lookup_cost includes the cost of
+ the reading the row from system (probably cached by the OS).
+ */
+ cost.io= 0;
+ return cost;
+}
+
/* Name is here without an extension */
int ha_myisam::open(const char *name, int mode, uint test_if_locked)
@@ -1960,9 +1971,8 @@ int ha_myisam::index_init(uint idx, bool sorted)
active_index=idx;
if (pushed_idx_cond_keyno == idx)
mi_set_index_cond_func(file, handler_index_cond_check, this);
- if (pushed_rowid_filter)
- mi_set_rowid_filter_func(file, handler_rowid_filter_check,
- handler_rowid_filter_is_active, this);
+ if (pushed_rowid_filter && handler_rowid_filter_is_active(this))
+ mi_set_rowid_filter_func(file, handler_rowid_filter_check, this);
return 0;
}
@@ -1970,11 +1980,10 @@ int ha_myisam::index_init(uint idx, bool sorted)
int ha_myisam::index_end()
{
DBUG_ENTER("ha_myisam::index_end");
- active_index=MAX_KEY;
- //pushed_idx_cond_keyno= MAX_KEY;
+ active_index= MAX_KEY;
mi_set_index_cond_func(file, NULL, 0);
in_range_check_pushed_down= FALSE;
- mi_set_rowid_filter_func(file, NULL, NULL, 0);
+ mi_set_rowid_filter_func(file, NULL, 0);
ds_mrr.dsmrr_close();
#if !defined(DBUG_OFF) && defined(SQL_SELECT_FIXED_FOR_UPDATE)
file->update&= ~HA_STATE_AKTIV; // Forget active row
@@ -2010,9 +2019,8 @@ int ha_myisam::index_read_idx_map(uchar *buf, uint index, const uchar *key,
end_range= NULL;
if (index == pushed_idx_cond_keyno)
mi_set_index_cond_func(file, handler_index_cond_check, this);
- if (pushed_rowid_filter)
- mi_set_rowid_filter_func(file, handler_rowid_filter_check,
- handler_rowid_filter_is_active, this);
+ if (pushed_rowid_filter && handler_rowid_filter_is_active(this))
+ mi_set_rowid_filter_func(file, handler_rowid_filter_check, this);
res= mi_rkey(file, buf, index, key, keypart_map, find_flag);
mi_set_index_cond_func(file, NULL, 0);
return res;
@@ -2585,6 +2593,22 @@ static int myisam_drop_table(handlerton *hton, const char *path)
return mi_delete_table(path);
}
+
+void myisam_update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+ /*
+ MyISAM row lookup costs are slow as the row data is not cached
+ The following numbers where found by check_costs.pl when using 1M rows
+ and all rows are cached. See optimizer_costs.txt
+ */
+ costs->row_next_find_cost= 0.000063539;
+ costs->row_lookup_cost= 0.001014818;
+ costs->key_next_find_cost= 0.000090585;
+ costs->key_lookup_cost= 0.000550142;
+ costs->key_copy_cost= 0.000015685;
+}
+
+
static int myisam_init(void *p)
{
handlerton *hton;
@@ -2604,6 +2628,7 @@ static int myisam_init(void *p)
hton->create= myisam_create_handler;
hton->drop_table= myisam_drop_table;
hton->panic= myisam_panic;
+ hton->update_optimizer_costs= myisam_update_optimizer_costs;
hton->flags= HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES;
hton->tablefile_extensions= ha_myisam_exts;
mi_killed= mi_killed_in_mariadb;
@@ -2643,7 +2668,8 @@ int ha_myisam::multi_range_read_next(range_id_t *range_info)
ha_rows ha_myisam::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *flags, Cost_estimate *cost)
+ uint *flags, ha_rows limit,
+ Cost_estimate *cost)
{
/*
This call is here because there is no location where this->table would
@@ -2652,7 +2678,7 @@ ha_rows ha_myisam::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
*/
ds_mrr.init(this, table);
return ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param, n_ranges, bufsz,
- flags, cost);
+ flags, limit, cost);
}
ha_rows ha_myisam::multi_range_read_info(uint keyno, uint n_ranges, uint keys,
@@ -2707,12 +2733,23 @@ Item *ha_myisam::idx_cond_push(uint keyno_arg, Item* idx_cond_arg)
bool ha_myisam::rowid_filter_push(Rowid_filter* rowid_filter)
{
+ /* This will be used in index_init() */
pushed_rowid_filter= rowid_filter;
- mi_set_rowid_filter_func(file, handler_rowid_filter_check,
- handler_rowid_filter_is_active, this);
return false;
}
+
+/* Enable / disable rowid filter depending if it's active or not */
+
+void ha_myisam::rowid_filter_changed()
+{
+ if (pushed_rowid_filter && handler_rowid_filter_is_active(this))
+ mi_set_rowid_filter_func(file, handler_rowid_filter_check, this);
+ else
+ mi_set_rowid_filter_func(file, NULL, this);
+}
+
+
struct st_mysql_storage_engine myisam_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h
index c4c46a63afa..0914d531788 100644
--- a/storage/myisam/ha_myisam.h
+++ b/storage/myisam/ha_myisam.h
@@ -54,125 +54,132 @@ class ha_myisam final : public handler
public:
ha_myisam(handlerton *hton, TABLE_SHARE *table_arg);
~ha_myisam() = default;
- handler *clone(const char *name, MEM_ROOT *mem_root);
- const char *index_type(uint key_number);
- ulonglong table_flags() const { return int_table_flags; }
- int index_init(uint idx, bool sorted);
- int index_end();
- int rnd_end();
-
- ulong index_flags(uint inx, uint part, bool all_parts) const;
- uint max_supported_keys() const { return MI_MAX_KEY; }
- uint max_supported_key_parts() const { return HA_MAX_KEY_SEG; }
- uint max_supported_key_length() const { return HA_MAX_KEY_LENGTH; }
- uint max_supported_key_part_length() const { return HA_MAX_KEY_LENGTH; }
- void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- int write_row(const uchar * buf);
- int update_row(const uchar * old_data, const uchar * new_data);
- int delete_row(const uchar * buf);
+ handler *clone(const char *name, MEM_ROOT *mem_root) override;
+ const char *index_type(uint key_number) override;
+ ulonglong table_flags() const override { return int_table_flags; }
+ int index_init(uint idx, bool sorted) override;
+ int index_end() override;
+ int rnd_end() override;
+
+ ulong index_flags(uint inx, uint part, bool all_parts) const override;
+ uint max_supported_keys() const override { return MI_MAX_KEY; }
+ uint max_supported_key_parts() const override { return HA_MAX_KEY_SEG; }
+ uint max_supported_key_length() const override { return HA_MAX_KEY_LENGTH; }
+ uint max_supported_key_part_length() const override
+ { return HA_MAX_KEY_LENGTH; }
+ void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share) override;
+ int open(const char *name, int mode, uint test_if_locked) override;
+ int close(void) override;
+ int write_row(const uchar * buf) override;
+ int update_row(const uchar * old_data, const uchar * new_data) override;
+ int delete_row(const uchar * buf) override;
int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map,
- enum ha_rkey_function find_flag);
+ enum ha_rkey_function find_flag) override;
int index_read_idx_map(uchar *buf, uint index, const uchar *key,
key_part_map keypart_map,
- enum ha_rkey_function find_flag);
- int index_next(uchar * buf);
- int index_prev(uchar * buf);
- int index_first(uchar * buf);
- int index_last(uchar * buf);
- int index_next_same(uchar *buf, const uchar *key, uint keylen);
- int ft_init()
+ enum ha_rkey_function find_flag) override;
+ int index_next(uchar * buf) override;
+ int index_prev(uchar * buf) override;
+ int index_first(uchar * buf) override;
+ int index_last(uchar * buf) override;
+ int index_next_same(uchar *buf, const uchar *key, uint keylen) override;
+ int ft_init() override
{
if (!ft_handler)
return 1;
ft_handler->please->reinit_search(ft_handler);
return 0;
}
- FT_INFO *ft_init_ext(uint flags, uint inx,String *key)
+ FT_INFO *ft_init_ext(uint flags, uint inx,String *key) override
{
return ft_init_search(flags,file,inx,
(uchar *)key->ptr(), key->length(), key->charset(),
table->record[0]);
}
- int ft_read(uchar *buf);
- int rnd_init(bool scan);
- int rnd_next(uchar *buf);
- int rnd_pos(uchar * buf, uchar *pos);
- int remember_rnd_pos();
- int restart_rnd_next(uchar *buf);
- void position(const uchar *record);
- int info(uint);
- int extra(enum ha_extra_function operation);
- int extra_opt(enum ha_extra_function operation, ulong cache_size);
- int reset(void);
- int external_lock(THD *thd, int lock_type);
- int delete_all_rows(void);
- int reset_auto_increment(ulonglong value);
- int disable_indexes(uint mode);
- int enable_indexes(uint mode);
- int indexes_are_disabled(void);
- void start_bulk_insert(ha_rows rows, uint flags);
- int end_bulk_insert();
+ int ft_read(uchar *buf) override;
+ int rnd_init(bool scan) override;
+ int rnd_next(uchar *buf) override;
+ int rnd_pos(uchar * buf, uchar *pos) override;
+ int remember_rnd_pos() override;
+ int restart_rnd_next(uchar *buf) override;
+ void position(const uchar *record) override;
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows) override;
+ int info(uint) override;
+ int extra(enum ha_extra_function operation) override;
+ int extra_opt(enum ha_extra_function operation, ulong cache_size) override;
+ int reset(void) override;
+ int external_lock(THD *thd, int lock_type) override;
+ int delete_all_rows(void) override;
+ int reset_auto_increment(ulonglong value) override;
+ int disable_indexes(uint mode) override;
+ int enable_indexes(uint mode) override;
+ int indexes_are_disabled(void) override;
+ void start_bulk_insert(ha_rows rows, uint flags) override;
+ int end_bulk_insert() override;
ha_rows records_in_range(uint inx, const key_range *min_key,
- const key_range *max_key, page_range *pages);
- void update_create_info(HA_CREATE_INFO *create_info);
- int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
+ const key_range *max_key, page_range *pages) override;
+ void update_create_info(HA_CREATE_INFO *create_info) override;
+ int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info) override;
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
- virtual void get_auto_increment(ulonglong offset, ulonglong increment,
- ulonglong nb_desired_values,
- ulonglong *first_value,
- ulonglong *nb_reserved_values);
- int rename_table(const char * from, const char * to);
- int delete_table(const char *name);
- int check_for_upgrade(HA_CHECK_OPT *check_opt);
- int check(THD* thd, HA_CHECK_OPT* check_opt);
- int analyze(THD* thd,HA_CHECK_OPT* check_opt);
- int repair(THD* thd, HA_CHECK_OPT* check_opt);
- bool check_and_repair(THD *thd);
- bool is_crashed() const;
- bool auto_repair(int error) const
+ enum thr_lock_type lock_type) override;
+ void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values) override;
+ int rename_table(const char * from, const char * to) override;
+ int delete_table(const char *name) override;
+ int check_for_upgrade(HA_CHECK_OPT *check_opt) override;
+ int check(THD* thd, HA_CHECK_OPT* check_opt) override;
+ int analyze(THD* thd,HA_CHECK_OPT* check_opt) override;
+ int repair(THD* thd, HA_CHECK_OPT* check_opt) override;
+ bool check_and_repair(THD *thd) override;
+ bool is_crashed() const override;
+ bool auto_repair(int error) const override
{
return (myisam_recover_options != HA_RECOVER_OFF &&
error == HA_ERR_CRASHED_ON_USAGE);
}
- int optimize(THD* thd, HA_CHECK_OPT* check_opt);
- int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt);
- int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
+ int optimize(THD* thd, HA_CHECK_OPT* check_opt) override;
+ int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt) override;
+ int preload_keys(THD* thd, HA_CHECK_OPT* check_opt) override;
enum_alter_inplace_result check_if_supported_inplace_alter(TABLE *new_table,
- Alter_inplace_info *alter_info);
- bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
+ Alter_inplace_info *alter_info)
+ override;
+ bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes)
+ override;
#ifdef HAVE_QUERY_CACHE
my_bool register_query_cache_table(THD *thd, const char *table_key,
uint key_length,
qc_engine_callback
*engine_callback,
- ulonglong *engine_data);
+ ulonglong *engine_data) override;
#endif
- MI_INFO *file_ptr(void)
- {
- return file;
- }
-public:
/**
* Multi Range Read interface
*/
int multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
- uint n_ranges, uint mode, HANDLER_BUFFER *buf);
- int multi_range_read_next(range_id_t *range_info);
+ uint n_ranges, uint mode, HANDLER_BUFFER *buf) override;
+ int multi_range_read_next(range_id_t *range_info) override;
ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *flags, Cost_estimate *cost);
+ uint *flags, ha_rows limit,
+ Cost_estimate *cost) override;
ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
uint key_parts, uint *bufsz,
- uint *flags, Cost_estimate *cost);
- int multi_range_read_explain_info(uint mrr_mode, char *str, size_t size);
+ uint *flags, Cost_estimate *cost) override;
+ int multi_range_read_explain_info(uint mrr_mode, char *str, size_t size) override;
/* Index condition pushdown implementation */
- Item *idx_cond_push(uint keyno, Item* idx_cond);
- bool rowid_filter_push(Rowid_filter* rowid_filter);
+ Item *idx_cond_push(uint keyno, Item* idx_cond) override;
+ bool rowid_filter_push(Rowid_filter* rowid_filter) override;
+ void rowid_filter_changed() override;
+
+ /* Used by myisammrg */
+ MI_INFO *file_ptr(void)
+ {
+ return file;
+ }
private:
DsMrr_impl ds_mrr;
diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c
index 66238745a04..e7e64edd926 100644
--- a/storage/myisam/mi_extra.c
+++ b/storage/myisam/mi_extra.c
@@ -376,16 +376,16 @@ void mi_set_index_cond_func(MI_INFO *info, index_cond_func_t func,
{
info->index_cond_func= func;
info->index_cond_func_arg= func_arg;
+ info->has_cond_pushdown= (info->index_cond_func || info->rowid_filter_func);
}
void mi_set_rowid_filter_func(MI_INFO *info,
rowid_filter_func_t check_func,
- rowid_filter_is_active_func_t is_active_func,
void *func_arg)
{
info->rowid_filter_func= check_func;
- info->rowid_filter_is_active_func= is_active_func;
info->rowid_filter_func_arg= func_arg;
+ info->has_cond_pushdown= (info->index_cond_func || info->rowid_filter_func);
}
/*
diff --git a/storage/myisam/mi_key.c b/storage/myisam/mi_key.c
index 087eb59c7c0..bde3ee19e2c 100644
--- a/storage/myisam/mi_key.c
+++ b/storage/myisam/mi_key.c
@@ -510,14 +510,6 @@ int mi_unpack_index_tuple(MI_INFO *info, uint keynr, uchar *record)
}
-static int mi_check_rowid_filter_is_active(MI_INFO *info)
-{
- if (info->rowid_filter_is_active_func == NULL)
- return 0;
- return info->rowid_filter_is_active_func(info->rowid_filter_func_arg);
-}
-
-
/*
Check the current index tuple: Check ICP condition and/or Rowid Filter
@@ -532,21 +524,23 @@ static int mi_check_rowid_filter_is_active(MI_INFO *info)
Check result according to check_result_t definition
*/
-check_result_t mi_check_index_tuple(MI_INFO *info, uint keynr, uchar *record)
+check_result_t mi_check_index_tuple_real(MI_INFO *info, uint keynr, uchar *record)
{
- int need_unpack= TRUE;
check_result_t res= CHECK_POS;
+ DBUG_ASSERT(info->index_cond_func || info->rowid_filter_func);
+
+ if (mi_unpack_index_tuple(info, keynr, record))
+ return CHECK_ERROR;
if (info->index_cond_func)
{
- if (mi_unpack_index_tuple(info, keynr, record))
- res= CHECK_ERROR;
- else if ((res= info->index_cond_func(info->index_cond_func_arg)) ==
- CHECK_OUT_OF_RANGE)
+ if ((res= info->index_cond_func(info->index_cond_func_arg)) ==
+ CHECK_OUT_OF_RANGE)
{
/* We got beyond the end of scanned range */
info->lastpos= HA_OFFSET_ERROR; /* No active record */
my_errno= HA_ERR_END_OF_FILE;
+ return res;
}
/*
@@ -555,25 +549,17 @@ check_result_t mi_check_index_tuple(MI_INFO *info, uint keynr, uchar *record)
*/
if (res != CHECK_POS)
return res;
-
- need_unpack= FALSE;
}
/* Check the Rowid Filter, if present */
- if (mi_check_rowid_filter_is_active(info))
+ if (info->rowid_filter_func)
{
- /* Unpack the index tuple if we haven't done it already */
- if (need_unpack && mi_unpack_index_tuple(info, keynr, record))
- res= CHECK_ERROR;
- else
+ if ((res= info->rowid_filter_func(info->rowid_filter_func_arg)) ==
+ CHECK_OUT_OF_RANGE)
{
- if ((res= info->rowid_filter_func(info->rowid_filter_func_arg)) ==
- CHECK_OUT_OF_RANGE)
- {
- /* We got beyond the end of scanned range */
- info->lastpos= HA_OFFSET_ERROR; /* No active record */
- my_errno= HA_ERR_END_OF_FILE;
- }
+ /* We got beyond the end of scanned range */
+ info->lastpos= HA_OFFSET_ERROR; /* No active record */
+ my_errno= HA_ERR_END_OF_FILE;
}
}
return res;
diff --git a/storage/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c
index bf6f3ef852c..590981fb790 100644
--- a/storage/myisam/mi_rkey.c
+++ b/storage/myisam/mi_rkey.c
@@ -119,7 +119,7 @@ int mi_rkey(MI_INFO *info, uchar *buf, int inx, const uchar *key,
while ((info->lastpos >= info->state->data_file_length &&
(search_flag != HA_READ_KEY_EXACT ||
last_used_keyseg != keyinfo->seg + keyinfo->keysegs)) ||
- (res= mi_check_index_tuple(info, inx, buf)) == CHECK_NEG)
+ (res= mi_check_index_tuple(info, inx, buf)) == CHECK_NEG)
{
uint not_used[2];
/*
diff --git a/storage/myisam/mi_scan.c b/storage/myisam/mi_scan.c
index 8d436c4eada..24aca8e8751 100644
--- a/storage/myisam/mi_scan.c
+++ b/storage/myisam/mi_scan.c
@@ -39,8 +39,10 @@ int mi_scan_init(register MI_INFO *info)
int mi_scan(MI_INFO *info, uchar *buf)
{
+ int tmp;
DBUG_ENTER("mi_scan");
/* Init all but update-flag */
info->update&= (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
- DBUG_RETURN ((*info->s->read_rnd)(info,buf,info->nextpos,1));
+ tmp= (*info->s->read_rnd)(info,buf,info->nextpos,1);
+ DBUG_RETURN(tmp);
}
diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c
index a5777527e54..17d2eef898a 100644
--- a/storage/myisam/myisamchk.c
+++ b/storage/myisam/myisamchk.c
@@ -15,12 +15,14 @@
/* Describe, check and repair of MyISAM tables */
+#define VER "2.7"
#include "fulltext.h"
#include "my_default.h"
#include <m_ctype.h>
#include <stdarg.h>
#include <my_getopt.h>
#include <my_bit.h>
+#include <welcome_copyright_notice.h>
static uint decode_bits;
static char **default_argv;
@@ -53,7 +55,6 @@ static const char *field_pack[]=
static const char *myisam_stats_method_str="nulls_unequal";
static void get_options(int *argc,char * * *argv);
-static void print_version(void);
static void usage(void);
static int myisamchk(HA_CHECK *param, char *filename);
static void descript(HA_CHECK *param, register MI_INFO *info, char * name);
@@ -331,13 +332,6 @@ static struct my_option my_long_options[] =
};
-static void print_version(void)
-{
- printf("%s Ver 2.7 for %s at %s\n", my_progname, SYSTEM_TYPE,
- MACHINE_TYPE);
-}
-
-
static void usage(void)
{
print_version();
diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h
index c90d989c975..5ede6a6159c 100644
--- a/storage/myisam/myisamdef.h
+++ b/storage/myisam/myisamdef.h
@@ -304,10 +304,10 @@ struct st_myisam_info
/* If info->buff has to be reread for rnext */
my_bool buff_used;
my_bool create_unique_index_by_sort;
+ my_bool has_cond_pushdown;
index_cond_func_t index_cond_func; /* Index condition function */
void *index_cond_func_arg; /* parameter for the func */
rowid_filter_func_t rowid_filter_func; /* rowid filter check function */
- rowid_filter_is_active_func_t rowid_filter_is_active_func; /* is activefunction */
void *rowid_filter_func_arg; /* parameter for the func */
THR_LOCK_DATA lock;
uchar *rtree_recursion_state; /* For RTREE */
@@ -742,7 +742,15 @@ my_bool mi_dynmap_file(MI_INFO *info, my_off_t size);
int mi_munmap_file(MI_INFO *info);
void mi_remap_file(MI_INFO *info, my_off_t size);
-check_result_t mi_check_index_tuple(MI_INFO *info, uint keynr, uchar *record);
+check_result_t mi_check_index_tuple_real(MI_INFO *info, uint keynr,
+ uchar *record);
+static inline check_result_t mi_check_index_tuple(MI_INFO *info, uint keynr,
+ uchar *record)
+{
+ if (!info->has_cond_pushdown && ! info->rowid_filter_func)
+ return CHECK_POS;
+ return mi_check_index_tuple_real(info, keynr, record);
+}
/* Functions needed by mi_check */
int killed_ptr(HA_CHECK *param);
@@ -754,7 +762,6 @@ extern void mi_set_index_cond_func(MI_INFO *info, index_cond_func_t check_func,
void *func_arg);
extern void mi_set_rowid_filter_func(MI_INFO *info,
rowid_filter_func_t check_func,
- rowid_filter_is_active_func_t is_active_func,
void *func_arg);
int flush_blocks(HA_CHECK *param, KEY_CACHE *key_cache, File file,
ulonglong *dirty_part_map);
diff --git a/storage/myisam/myisamlog.c b/storage/myisam/myisamlog.c
index 40d473dc532..4e51af4ac3d 100644
--- a/storage/myisam/myisamlog.c
+++ b/storage/myisam/myisamlog.c
@@ -20,12 +20,14 @@
#define USE_MY_FUNC
#endif
+#define VER "1.4"
#include "myisamdef.h"
#include <my_tree.h>
#include <stdarg.h>
#ifdef HAVE_GETRUSAGE
#include <sys/resource.h>
#endif
+#include <welcome_copyright_notice.h>
#define FILENAME(A) (A ? A->show_name : "Unknown")
@@ -249,8 +251,7 @@ static void get_options(register int *argc, register char ***argv)
/* Fall through */
case 'I':
case '?':
- printf("%s Ver 1.4 for %s at %s\n",my_progname,SYSTEM_TYPE,
- MACHINE_TYPE);
+ print_version();
puts("By Monty, for your professional use\n");
if (version)
break;
diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c
index d6cd9334a55..709530d915c 100644
--- a/storage/myisam/myisampack.c
+++ b/storage/myisam/myisampack.c
@@ -20,6 +20,7 @@
#define USE_MY_FUNC /* We need at least my_malloc */
#endif
+#define VER "1.23"
#include "myisamdef.h"
#include "my_default.h"
#include <queues.h>
@@ -30,6 +31,7 @@
#endif
#include <my_getopt.h>
#include <assert.h>
+#include <welcome_copyright_notice.h>
#if SIZEOF_LONG_LONG > 4
#define BITS_SAVED 64
@@ -289,13 +291,6 @@ static struct my_option my_long_options[] =
};
-static void print_version(void)
-{
- printf("%s Ver 1.23 for %s on %s\n",
- my_progname, SYSTEM_TYPE, MACHINE_TYPE);
-}
-
-
static void usage(void)
{
print_version();
diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc
index d37636abab7..ee5d44b5d26 100644
--- a/storage/myisammrg/ha_myisammrg.cc
+++ b/storage/myisammrg/ha_myisammrg.cc
@@ -339,6 +339,33 @@ static void myrg_set_external_ref(MYRG_INFO *m_info, void *ext_ref_arg)
}
}
+IO_AND_CPU_COST ha_myisammrg::rnd_pos_time(ha_rows rows)
+{
+ IO_AND_CPU_COST cost= handler::rnd_pos_time(rows);
+ /*
+ Row data is notcached. costs.row_lookup_cost includes the cost of
+ the reading the row from system (probably cached by the OS).
+ */
+ cost.io= 0;
+ return cost;
+}
+
+IO_AND_CPU_COST ha_myisammrg::keyread_time(uint index, ulong ranges,
+ ha_rows rows,
+ ulonglong blocks)
+{
+ IO_AND_CPU_COST cost= handler::keyread_time(index, ranges, rows, blocks);
+ if (!blocks)
+ {
+ cost.io*= file->tables;
+ cost.cpu*= file->tables;
+ }
+ /* Add the cost of having to do a key lookup in all trees */
+ if (file->tables)
+ cost.cpu+= (file->tables-1) * (ranges * KEY_LOOKUP_COST);
+ return cost;
+}
+
/**
Open a MERGE parent table, but not its children.
@@ -1744,6 +1771,12 @@ int myisammrg_panic(handlerton *hton, ha_panic_function flag)
return myrg_panic(flag);
}
+static void myisammrg_update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+ myisam_update_optimizer_costs(costs);
+}
+
+
static int myisammrg_init(void *p)
{
handlerton *myisammrg_hton;
@@ -1759,7 +1792,7 @@ static int myisammrg_init(void *p)
myisammrg_hton->panic= myisammrg_panic;
myisammrg_hton->flags= HTON_NO_PARTITION;
myisammrg_hton->tablefile_extensions= ha_myisammrg_exts;
-
+ myisammrg_hton->update_optimizer_costs= myisammrg_update_optimizer_costs;
return 0;
}
diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h
index 6da327ec84b..0435f7d6bd6 100644
--- a/storage/myisammrg/ha_myisammrg.h
+++ b/storage/myisammrg/ha_myisammrg.h
@@ -82,8 +82,8 @@ public:
ha_myisammrg(handlerton *hton, TABLE_SHARE *table_arg);
~ha_myisammrg();
- const char *index_type(uint key_number);
- ulonglong table_flags() const
+ const char *index_type(uint key_number) override;
+ ulonglong table_flags() const override
{
return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_NO_TRANSACTIONS |
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
@@ -93,70 +93,81 @@ public:
HA_NO_COPY_ON_ALTER |
HA_DUPLICATE_POS | HA_CAN_MULTISTEP_MERGE);
}
- ulong index_flags(uint inx, uint part, bool all_parts) const
+ ulong index_flags(uint inx, uint part, bool all_parts) const override
{
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
HA_READ_ORDER | HA_KEYREAD_ONLY);
}
- uint max_supported_keys() const { return MI_MAX_KEY; }
- uint max_supported_key_length() const { return HA_MAX_KEY_LENGTH; }
- uint max_supported_key_part_length() const { return HA_MAX_KEY_LENGTH; }
- double scan_time()
- { return ulonglong2double(stats.data_file_length) / IO_SIZE + file->tables; }
-
- int open(const char *name, int mode, uint test_if_locked);
- int add_children_list(void);
- int attach_children(void);
- int detach_children(void);
- virtual handler *clone(const char *name, MEM_ROOT *mem_root);
- int close(void);
- int write_row(const uchar * buf);
- int update_row(const uchar * old_data, const uchar * new_data);
- int delete_row(const uchar * buf);
+ uint max_supported_keys() const override { return MI_MAX_KEY; }
+ uint max_supported_key_length() const override { return HA_MAX_KEY_LENGTH; }
+ uint max_supported_key_part_length() const override
+ { return HA_MAX_KEY_LENGTH; }
+ IO_AND_CPU_COST scan_time() override
+ {
+ IO_AND_CPU_COST cost;
+ cost.io= (ulonglong2double(stats.data_file_length) / IO_SIZE +
+ file->tables),
+ cost.cpu= records() * ROW_NEXT_FIND_COST;
+ return cost;
+ }
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows) override;
+ IO_AND_CPU_COST keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks) override;
+ int open(const char *name, int mode, uint test_if_locked) override;
+ handler *clone(const char *name, MEM_ROOT *mem_root) override;
+ int close(void) override;
+ int write_row(const uchar * buf) override;
+ int update_row(const uchar * old_data, const uchar * new_data) override;
+ int delete_row(const uchar * buf) override;
int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map,
- enum ha_rkey_function find_flag);
+ enum ha_rkey_function find_flag) override;
int index_read_idx_map(uchar *buf, uint index, const uchar *key,
key_part_map keypart_map,
- enum ha_rkey_function find_flag);
- int index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map);
- int index_next(uchar * buf);
- int index_prev(uchar * buf);
- int index_first(uchar * buf);
- int index_last(uchar * buf);
- int index_next_same(uchar *buf, const uchar *key, uint keylen);
- int rnd_init(bool scan);
- int rnd_next(uchar *buf);
- int rnd_pos(uchar * buf, uchar *pos);
- void position(const uchar *record);
+ enum ha_rkey_function find_flag) override;
+ int index_read_last_map(uchar *buf, const uchar *key, key_part_map keypart_map) override;
+ int index_next(uchar * buf) override;
+ int index_prev(uchar * buf) override;
+ int index_first(uchar * buf) override;
+ int index_last(uchar * buf) override;
+ int index_next_same(uchar *buf, const uchar *key, uint keylen) override;
+ int rnd_init(bool scan) override;
+ int rnd_next(uchar *buf) override;
+ int rnd_pos(uchar * buf, uchar *pos) override;
+ void position(const uchar *record) override;
ha_rows records_in_range(uint inx, const key_range *start_key,
- const key_range *end_key, page_range *pages);
- int delete_all_rows();
- int info(uint);
- int reset(void);
- int extra(enum ha_extra_function operation);
- int extra_opt(enum ha_extra_function operation, ulong cache_size);
- int external_lock(THD *thd, int lock_type);
- uint lock_count(void) const;
- int create_mrg(const char *name, HA_CREATE_INFO *create_info);
- int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
+ const key_range *end_key, page_range *pages) override;
+ int delete_all_rows() override;
+ int info(uint) override;
+ int reset(void) override;
+ int extra(enum ha_extra_function operation) override;
+ int extra_opt(enum ha_extra_function operation, ulong cache_size) override;
+ int external_lock(THD *thd, int lock_type) override;
+ uint lock_count(void) const override;
+ int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info) override;
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
- void update_create_info(HA_CREATE_INFO *create_info);
- void append_create_info(String *packet);
- MYRG_INFO *myrg_info() { return file; }
- TABLE *table_ptr() { return table; }
+ enum thr_lock_type lock_type) override;
+ void update_create_info(HA_CREATE_INFO *create_info) override;
+ void append_create_info(String *packet) override;
enum_alter_inplace_result check_if_supported_inplace_alter(TABLE *,
- Alter_inplace_info *);
+ Alter_inplace_info *) override;
bool inplace_alter_table(TABLE *altered_table,
- Alter_inplace_info *ha_alter_info);
- int check(THD* thd, HA_CHECK_OPT* check_opt);
- ha_rows records();
- virtual uint count_query_cache_dependant_tables(uint8 *tables_type);
+ Alter_inplace_info *ha_alter_info) override;
+ int check(THD* thd, HA_CHECK_OPT* check_opt) override;
+ ha_rows records() override;
+ virtual uint count_query_cache_dependant_tables(uint8 *tables_type) override;
virtual my_bool
register_query_cache_dependant_tables(THD *thd,
Query_cache *cache,
Query_cache_block_table **block,
- uint *n);
- virtual void set_lock_type(enum thr_lock_type lock);
+ uint *n) override;
+ virtual void set_lock_type(enum thr_lock_type lock) override;
+
+ /* Internal interface functions, not part of the normal handler interface */
+ int add_children_list(void);
+ int attach_children(void);
+ int detach_children(void);
+ int create_mrg(const char *name, HA_CREATE_INFO *create_info);
+ MYRG_INFO *myrg_info() { return file; }
+ TABLE *table_ptr() { return table; }
};
diff --git a/storage/oqgraph/ha_oqgraph.h b/storage/oqgraph/ha_oqgraph.h
index c8e175df616..d1f5a898ad7 100644
--- a/storage/oqgraph/ha_oqgraph.h
+++ b/storage/oqgraph/ha_oqgraph.h
@@ -74,9 +74,10 @@ public:
const char **bas_ext() const;
uint max_supported_keys() const { return MAX_KEY; }
uint max_supported_key_part_length() const { return MAX_KEY_LENGTH; }
- double scan_time() { return (double) 1000000000; }
- double read_time(uint index, uint ranges, ha_rows rows)
- { return 1; }
+ IO_AND_CPU_COST scan_time()
+ { return { (double) 1000000000, (double) 1000000000 }; }
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows)
+ { return { (double) rows, (double) rows }; }
// Doesn't make sense to change the engine on a virtual table.
virtual bool can_switch_engines() { return false; }
diff --git a/storage/perfschema/ha_perfschema.h b/storage/perfschema/ha_perfschema.h
index eab97434265..20ed7448a1e 100644
--- a/storage/perfschema/ha_perfschema.h
+++ b/storage/perfschema/ha_perfschema.h
@@ -104,8 +104,10 @@ public:
ha_rows estimate_rows_upper_bound(void)
{ return HA_POS_ERROR; }
- double scan_time(void)
- { return 1.0; }
+ IO_AND_CPU_COST scan_time(void)
+ {
+ return {0.0, 1.0};
+ }
/**
Open a performance schema table.
diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt
index d3f7ca90889..544ae62e6e2 100644
--- a/storage/rocksdb/CMakeLists.txt
+++ b/storage/rocksdb/CMakeLists.txt
@@ -155,6 +155,8 @@ IF(NOT TARGET rocksdb)
RETURN()
ENDIF()
+INSTALL_MANPAGES(rocksdb-engine mariadb-ldb.1 myrocks_hotbackup.1)
+
CHECK_CXX_SOURCE_COMPILES("
#if defined(_MSC_VER) && !defined(__thread)
#define __thread __declspec(thread)
diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc
index 8067d6f6b93..86300f1cf71 100644
--- a/storage/rocksdb/ha_rocksdb.cc
+++ b/storage/rocksdb/ha_rocksdb.cc
@@ -5235,6 +5235,24 @@ static int rocksdb_check_version(handlerton *hton,
return (create_id == ver);
}
+
+/*
+ Setup costs factors for RocksDB to be able to approximate how many
+ ms different opperations takes. See cost functions in handler.h how
+ the different variables are used
+*/
+
+static void rocksdb_update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+ /* See optimizer_costs.txt for how these are calculated */
+ costs->row_next_find_cost= 0.00015161;
+ costs->row_lookup_cost= 0.00150453;
+ costs->key_next_find_cost= 0.00025108;
+ costs->key_lookup_cost= 0.00079369;
+ costs->row_copy_cost= 0.00006087;
+}
+
+
/*
Storage Engine initialization function, invoked when plugin is loaded.
*/
@@ -5343,6 +5361,7 @@ static int rocksdb_init_func(void *const p) {
rocksdb_hton->savepoint_rollback = rocksdb_rollback_to_savepoint;
rocksdb_hton->savepoint_rollback_can_release_mdl =
rocksdb_rollback_to_savepoint_can_release_mdl;
+ rocksdb_hton->update_optimizer_costs= rocksdb_update_optimizer_costs;
#ifdef MARIAROCKS_NOT_YET
rocksdb_hton->update_table_stats = rocksdb_update_table_stats;
#endif // MARIAROCKS_NOT_YET
@@ -14631,17 +14650,25 @@ bool ha_rocksdb::use_read_free_rpl() const {
}
#endif // MARIAROCKS_NOT_YET
-double ha_rocksdb::read_time(uint index, uint ranges, ha_rows rows) {
+IO_AND_CPU_COST ha_rocksdb::keyread_time(uint index, ulong ranges,
+ ha_rows rows,
+ ulonglong blocks) {
DBUG_ENTER_FUNC();
+ IO_AND_CPU_COST cost;
+ cost= handler::keyread_time(index, ranges, rows, blocks);
+ cost.io/= 4; // Assume 75% compression (75% less IO)
+ DBUG_RETURN(cost);
+}
- if (index != table->s->primary_key) {
- /* Non covering index range scan */
- DBUG_RETURN(handler::read_time(index, ranges, rows));
- }
- DBUG_RETURN((rows / 20.0) + 1);
+ulonglong ha_rocksdb::index_blocks(uint index, uint ranges, ha_rows rows)
+{
+ size_t len= table->key_storage_length(index);
+ ulonglong blocks= (rows * len / 4) / stats.block_size + ranges; // 75 % compression
+ return blocks * stats.block_size / IO_SIZE;
}
+
void ha_rocksdb::print_error(int error, myf errflag) {
if (error == HA_ERR_ROCKSDB_STATUS_BUSY) {
error = HA_ERR_LOCK_DEADLOCK;
diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h
index f847ee25cb8..d03c183873e 100644
--- a/storage/rocksdb/ha_rocksdb.h
+++ b/storage/rocksdb/ha_rocksdb.h
@@ -623,15 +623,19 @@ public:
bool sorted) override
MY_ATTRIBUTE((__warn_unused_result__));
- virtual double scan_time() override {
+ IO_AND_CPU_COST scan_time() override
+ {
+ IO_AND_CPU_COST cost;
DBUG_ENTER_FUNC();
-
- DBUG_RETURN(
- static_cast<double>((stats.records + stats.deleted) / 20.0 + 10));
+ cost= handler::scan_time();
+ cost.cpu+= stats.deleted * ROW_NEXT_FIND_COST; // We have to skip over deleted rows
+ DBUG_RETURN(cost);
}
+ IO_AND_CPU_COST keyread_time(uint index, ulong ranges,
+ ha_rows rows, ulonglong blocks) override;
- virtual double read_time(uint, uint, ha_rows rows) override;
- virtual void print_error(int error, myf errflag) override;
+ ulonglong index_blocks(uint index, uint ranges, ha_rows rows) override;
+ void print_error(int error, myf errflag) override;
int open(const char *const name, int mode, uint test_if_locked) override
MY_ATTRIBUTE((__warn_unused_result__));
diff --git a/man/mysql_ldb.1 b/storage/rocksdb/mariadb-ldb.1
index e1c08bba995..e1c08bba995 100644
--- a/man/mysql_ldb.1
+++ b/storage/rocksdb/mariadb-ldb.1
diff --git a/man/myrocks_hotbackup.1 b/storage/rocksdb/myrocks_hotbackup.1
index 4237c452f76..4237c452f76 100644
--- a/man/myrocks_hotbackup.1
+++ b/storage/rocksdb/myrocks_hotbackup.1
diff --git a/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc
index c76b52d4cc1..bf593ec9b0c 100644
--- a/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc
+++ b/storage/rocksdb/mysql-test/rocksdb/include/rocksdb_icp.inc
@@ -49,7 +49,7 @@ insert into t3 select a,a/10,a,a from t1;
explain
select * from t3 where kp1=3 and kp2 like '%foo%';
---replace_column 9 #
+--source include/explain-no-costs.inc
explain format=json
select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%';
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result
index 6bd6cea97de..a14ffdec2e3 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result
@@ -92,7 +92,5 @@ disconnect con2;
disconnect con1;
disconnect con0;
SELECT * FROM t1 ORDER BY pk INTO OUTFILE <output_file>;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
All pk values matched their expected values
DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result
index c4a1c5f4668..1f4d1a641a2 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result
@@ -20,8 +20,6 @@ END IF;
SET id1_cond = id1_cond + 1;
END WHILE;
END//
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
"Skipping bloom filter"
SET session rocksdb_skip_bloom_filter_on_read=1;
CALL select_test();
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result
index df4c8ee424c..d2974438ecb 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result
@@ -98,12 +98,8 @@ buffer_LRU_unzip_search_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL N
buffer_LRU_unzip_search_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 set_member Page scanned per single LRU unzip search
buffer_page_read_index_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Leaf Pages read
buffer_page_read_index_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Non-leaf Pages read
-buffer_page_read_index_ibuf_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Index Leaf Pages read
-buffer_page_read_index_ibuf_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Index Non-Leaf Pages read
buffer_page_read_undo_log buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Undo Log Pages read
buffer_page_read_index_inode buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Inode Pages read
-buffer_page_read_ibuf_free_list buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Free List Pages read
-buffer_page_read_ibuf_bitmap buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Bitmap Pages read
buffer_page_read_system_page buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of System Pages read
buffer_page_read_trx_system buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Transaction System Pages read
buffer_page_read_fsp_hdr buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of File Space Header Pages read
@@ -114,12 +110,8 @@ buffer_page_read_zblob2 buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NU
buffer_page_read_other buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of other/unknown (old version of InnoDB) Pages read
buffer_page_written_index_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Leaf Pages written
buffer_page_written_index_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Non-leaf Pages written
-buffer_page_written_index_ibuf_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Index Leaf Pages written
-buffer_page_written_index_ibuf_non_leaf buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Index Non-Leaf Pages written
buffer_page_written_undo_log buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Undo Log Pages written
buffer_page_written_index_inode buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Index Inode Pages written
-buffer_page_written_ibuf_free_list buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Free List Pages written
-buffer_page_written_ibuf_bitmap buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Insert Buffer Bitmap Pages written
buffer_page_written_system_page buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of System Pages written
buffer_page_written_trx_system buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Transaction System Pages written
buffer_page_written_fsp_hdr buffer_page_io 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of File Space Header Pages written
@@ -187,14 +179,6 @@ adaptive_hash_rows_removed adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL
adaptive_hash_rows_deleted_no_hash_entry adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of rows deleted that did not have corresponding Adaptive Hash Index entries
adaptive_hash_rows_updated adaptive_hash_index 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of Adaptive Hash Index rows updated
file_num_open_files file_system 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 value Number of files currently open (innodb_num_open_files)
-ibuf_merges_insert change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of inserted records merged by change buffering
-ibuf_merges_delete_mark change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of deleted records merged by change buffering
-ibuf_merges_delete change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of purge records merged by change buffering
-ibuf_merges_discard_insert change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of insert merged operations discarded
-ibuf_merges_discard_delete_mark change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of deleted merged operations discarded
-ibuf_merges_discard_delete change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of purge merged operations discarded
-ibuf_merges change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Number of change buffer merges
-ibuf_size change_buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Change buffer size in pages
innodb_master_thread_sleeps server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of times (seconds) master thread sleeps
innodb_activity_count server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Current server activity count
innodb_master_active_loops server 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of times master thread performs its tasks when server is active
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mariadb_port_fixes.result b/storage/rocksdb/mysql-test/rocksdb/r/mariadb_port_fixes.result
index 730e12d02f6..6645a33e356 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/mariadb_port_fixes.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/mariadb_port_fixes.result
@@ -39,8 +39,8 @@ a varchar(10) NOT NULL,
e int(11) DEFAULT 0,
KEY (a)
) ENGINE=ROCKSDB DEFAULT CHARSET=utf8;
-insert into t1 values (1,1,1),(2,2,2);
-explain select a from t1 where a <'zzz';
+insert into t1 values (1,"a",1),(2,"b",2),(3,"c",2);
+explain select a from t1 where a <'b';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 32 NULL # Using where
CREATE TABLE t2(
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result b/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result
index 6ea13872033..3a631d2925b 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result
@@ -1,123 +1,63 @@
Warnings:
Note 1051 Unknown table 'test.ti_nk'
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
DROP TABLE ti_nk;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
index 989d28e773d..0c9d29efa28 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
@@ -278,12 +278,12 @@ j
1
4
EXPLAIN
-SELECT * FROM t10, t11 WHERE i=j;
+SELECT * FROM t11 straight_join t10 WHERE i=j;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t10 index PRIMARY PRIMARY 4 NULL # Using index
-1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t10.i # Using index
-SELECT * FROM t10, t11 WHERE i=j;
-i j
+1 SIMPLE t11 index PRIMARY PRIMARY 4 NULL # Using index
+1 SIMPLE t10 eq_ref PRIMARY PRIMARY 4 test.t11.j # Using index
+SELECT * FROM t11 straight_join t10 WHERE i=j;
+j i
1 1
DROP TABLE t10,t11;
#
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result
index f9e3129c73f..a4717570450 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp.result
@@ -47,6 +47,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -56,7 +57,9 @@ EXPLAIN
"key": "kp1",
"key_length": "5",
"used_key_parts": ["kp1"],
+ "loops": 1,
"rows": 1000,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t3.kp1 between 2 and 4 and t3.kp1 MOD 3 = 0",
"attached_condition": "t3.kp2 like '%foo%'"
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result
index 3634f8c023e..07bce244792 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_icp_rev.result
@@ -47,6 +47,7 @@ EXPLAIN
{
"query_block": {
"select_id": 1,
+ "cost": "COST_REPLACED",
"nested_loop": [
{
"table": {
@@ -56,7 +57,9 @@ EXPLAIN
"key": "kp1",
"key_length": "5",
"used_key_parts": ["kp1"],
+ "loops": 1,
"rows": 1000,
+ "cost": "COST_REPLACED",
"filtered": 100,
"index_condition": "t3.kp1 between 2 and 4 and t3.kp1 MOD 3 = 0",
"attached_condition": "t3.kp2 like '%foo%'"
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select.result b/storage/rocksdb/mysql-test/rocksdb/r/select.result
index 7ea43adc9ea..fc3825d5377 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/select.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/select.result
@@ -115,8 +115,6 @@ SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a
INTO OUTFILE '<DATADIR>/select.out'
CHARACTER SET utf8
FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '''';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
200,'bar'
200,'bar'
100,'foobar'
@@ -128,12 +126,8 @@ INTO DUMPFILE '<DATADIR>/select.dump';
ERROR 42000: Result consisted of more than one row
SELECT t1.*, t2.* FROM t1, t2 ORDER BY t2.b, t1.a, t2.a, t1.b, t1.pk, t2.pk LIMIT 1
INTO DUMPFILE '<DATADIR>/select.dump';
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
1z2200bar3
SELECT MIN(a), MAX(a) FROM t1 INTO @min, @max;
-Warnings:
-Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT @min, @max;
@min @max
1 200
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result
index 3c9c30bb617..39413ea5987 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_char_indexes.result
@@ -45,7 +45,7 @@ t1 1 v16 1 v16 A 500 NULL NULL YES LSMTREE NO
INSERT INTO t1 (c,c20,v16,v128,pk) VALUES ('a','char1','varchar1a','varchar1b','1'),('a','char2','varchar2a','varchar2b','2'),('b','char3','varchar1a','varchar1b','3'),('c','char4','varchar3a','varchar3b','4');
EXPLAIN SELECT SUBSTRING(v16,0,3) FROM t1 WHERE v16 LIKE 'varchar%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v16 v16 19 NULL # Using where; Using index
+1 SIMPLE t1 index v16 v16 19 NULL # Using where; Using index
SELECT SUBSTRING(v16,7,3) FROM t1 WHERE v16 LIKE 'varchar%';
SUBSTRING(v16,7,3)
r1a
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result
index bd40e32f94d..5e89648648d 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_date_time_indexes.result
@@ -62,7 +62,7 @@ INSERT INTO t1 (d,dt,ts,t,y,pk) VALUES
(DATE(@tm),@tm,TIMESTAMP(@tm),TIME(@tm),YEAR(@tm),'12:05:00');
EXPLAIN SELECT ts FROM t1 WHERE ts > NOW();
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range ts ts 5 NULL # Using where; Using index
+1 SIMPLE t1 index ts ts 5 NULL # Using where; Using index
SELECT ts FROM t1 WHERE ts > NOW();
ts
EXPLAIN SELECT ts FROM t1 USE INDEX () WHERE ts > NOW();
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result
index b0bcfd7075c..011fa0894ec 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_enum_indexes.result
@@ -49,7 +49,7 @@ t1 0 PRIMARY 1 pk A 1000 NULL NULL LSMTREE NO
t1 1 b 1 b A 500 NULL NULL YES LSMTREE NO
EXPLAIN SELECT DISTINCT b FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL b 2 NULL #
+1 SIMPLE t1 ALL NULL NULL NULL NULL # Using temporary
SELECT DISTINCT b FROM t1;
b
test1
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result b/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result
index 89dc65e56f8..a98f90f28da 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_float_indexes.result
@@ -114,7 +114,7 @@ INSERT INTO t1 (f,r,d,dp,pk) VALUES
(4644,1422.22,466664.999,0.5,5);
EXPLAIN SELECT DISTINCT d FROM t1 ORDER BY d;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL d 9 NULL # Using index for group-by
+1 SIMPLE t1 index NULL d 9 NULL # Using index
SELECT DISTINCT d FROM t1 ORDER BY d;
d
-1
@@ -177,7 +177,7 @@ INSERT INTO t1 (f,r,d,dp,pk) VALUES
(1.2345,0,0,0,6);
EXPLAIN SELECT DISTINCT f FROM t1 ORDER BY f;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL f 5 NULL # Using index for group-by
+1 SIMPLE t1 index NULL f 5 NULL # Using index
SELECT DISTINCT f FROM t1 ORDER BY f;
f
-1
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mariadb_port_fixes.test b/storage/rocksdb/mysql-test/rocksdb/t/mariadb_port_fixes.test
index 99d4e2d117c..da4ac350654 100644
--- a/storage/rocksdb/mysql-test/rocksdb/t/mariadb_port_fixes.test
+++ b/storage/rocksdb/mysql-test/rocksdb/t/mariadb_port_fixes.test
@@ -37,9 +37,9 @@ CREATE TABLE t1(
e int(11) DEFAULT 0,
KEY (a)
) ENGINE=ROCKSDB DEFAULT CHARSET=utf8;
-insert into t1 values (1,1,1),(2,2,2);
+insert into t1 values (1,"a",1),(2,"b",2),(3,"c",2);
--replace_column 9 #
-explain select a from t1 where a <'zzz';
+explain select a from t1 where a <'b';
CREATE TABLE t2(
pk int,
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test
index f7de167bd96..9b24ad952d7 100644
--- a/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test
+++ b/storage/rocksdb/mysql-test/rocksdb/t/rocksdb.test
@@ -266,8 +266,8 @@ select * from t10;
select * from t11;
--replace_column 9 #
EXPLAIN
-SELECT * FROM t10, t11 WHERE i=j;
-SELECT * FROM t10, t11 WHERE i=j;
+SELECT * FROM t11 straight_join t10 WHERE i=j;
+SELECT * FROM t11 straight_join t10 WHERE i=j;
DROP TABLE t10,t11;
diff --git a/storage/rocksdb/tools/mysql_ldb.cc b/storage/rocksdb/tools/mysql_ldb.cc
index b1eec03f214..454b7a63c73 100644
--- a/storage/rocksdb/tools/mysql_ldb.cc
+++ b/storage/rocksdb/tools/mysql_ldb.cc
@@ -8,6 +8,7 @@
#include "rocksdb/ldb_tool.h"
int main(int argc, char **argv) {
+ MY_INIT(argv[0]);
rocksdb::Options db_options;
myrocks::Rdb_pk_comparator pk_comparator;
db_options.comparator = &pk_comparator;
diff --git a/storage/sequence/mysql-test/sequence/group_by.result b/storage/sequence/mysql-test/sequence/group_by.result
index bcda2ba5c76..7c098de9afd 100644
--- a/storage/sequence/mysql-test/sequence/group_by.result
+++ b/storage/sequence/mysql-test/sequence/group_by.result
@@ -86,7 +86,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL PRIMARY 8 NULL 8 Using index; Using join buffer (flat, BNL join)
explain select count(*) from seq_1_to_15_step_2 where seq > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE seq_1_to_15_step_2 index PRIMARY PRIMARY 8 NULL 8 Using where; Using index
+1 SIMPLE seq_1_to_15_step_2 range PRIMARY PRIMARY 8 NULL 8 Using where; Using index
explain select count(*) from seq_1_to_15_step_2 group by mod(seq,2);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE seq_1_to_15_step_2 index NULL PRIMARY 8 NULL 8 Using index; Using temporary; Using filesort
diff --git a/storage/sequence/sequence.cc b/storage/sequence/sequence.cc
index b2bce9325ac..6f66e122ed9 100644
--- a/storage/sequence/sequence.cc
+++ b/storage/sequence/sequence.cc
@@ -64,45 +64,53 @@ public:
Sequence_share *seqs;
ha_seq(handlerton *hton, TABLE_SHARE *table_arg)
: handler(hton, table_arg), seqs(0) { }
- ulonglong table_flags() const
+ ulonglong table_flags() const override
{ return HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE; }
/* open/close/locking */
int create(const char *name, TABLE *table_arg,
- HA_CREATE_INFO *create_info)
+ HA_CREATE_INFO *create_info) override
{ return HA_ERR_WRONG_COMMAND; }
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- int delete_table(const char *name)
+ int open(const char *name, int mode, uint test_if_locked) override;
+ int close(void) override;
+ int delete_table(const char *name) override
{
return 0;
}
- THR_LOCK_DATA **store_lock(THD *, THR_LOCK_DATA **, enum thr_lock_type);
+ THR_LOCK_DATA **store_lock(THD *, THR_LOCK_DATA **, enum thr_lock_type)
+ override;
/* table scan */
- int rnd_init(bool scan);
- int rnd_next(unsigned char *buf);
- void position(const uchar *record);
- int rnd_pos(uchar *buf, uchar *pos);
- int info(uint flag);
-
+ int rnd_init(bool scan) override;
+ int rnd_next(unsigned char *buf) override;
+ void position(const uchar *record) override;
+ int rnd_pos(uchar *buf, uchar *pos) override;
+ int info(uint flag) override;
+ IO_AND_CPU_COST keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks) override
+ {
+ /* Avoids assert in total_cost() and makes DBUG_PRINT more consistent */
+ return {0,0};
+ }
+ IO_AND_CPU_COST scan_time() override
+ {
+ /* Avoids assert in total_cost() and makes DBUG_PRINT more consistent */
+ return {0, 0};
+ }
/* indexes */
- ulong index_flags(uint inx, uint part, bool all_parts) const
+ ulong index_flags(uint inx, uint part, bool all_parts) const override
{ return HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER |
HA_READ_RANGE | HA_KEYREAD_ONLY; }
- uint max_supported_keys() const { return 1; }
+ uint max_supported_keys() const override { return 1; }
int index_read_map(uchar *buf, const uchar *key, key_part_map keypart_map,
- enum ha_rkey_function find_flag);
- int index_next(uchar *buf);
- int index_prev(uchar *buf);
- int index_first(uchar *buf);
- int index_last(uchar *buf);
+ enum ha_rkey_function find_flag) override;
+ int index_next(uchar *buf) override;
+ int index_prev(uchar *buf) override;
+ int index_first(uchar *buf) override;
+ int index_last(uchar *buf) override;
ha_rows records_in_range(uint inx, const key_range *start_key,
- const key_range *end_key, page_range *pages);
- double scan_time() { return (double)nvalues(); }
- double read_time(uint index, uint ranges, ha_rows rows) { return (double)rows; }
- double keyread_time(uint index, uint ranges, ha_rows rows) { return (double)rows; }
+ const key_range *end_key, page_range *pages) override;
private:
void set(uchar *buf);
@@ -492,6 +500,17 @@ int ha_seq_group_by_handler::next_row()
DBUG_RETURN(0);
}
+static void sequence_update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+ costs->disk_read_cost= 0;
+ costs->disk_read_ratio= 0.0; // No disk
+ costs->key_next_find_cost=
+ costs->key_lookup_cost=
+ costs->key_copy_cost=
+ costs->row_next_find_cost=
+ costs->row_lookup_cost=
+ costs->row_copy_cost= 0.0000062391530550;
+}
/*****************************************************************************
Initialize the interface between the sequence engine and MariaDB
@@ -518,6 +537,7 @@ static int init(void *p)
hton->savepoint_set= hton->savepoint_rollback= hton->savepoint_release=
dummy_savepoint;
hton->create_group_by= create_group_by_handler;
+ hton->update_optimizer_costs= sequence_update_optimizer_costs;
return 0;
}
diff --git a/storage/sphinx/ha_sphinx.h b/storage/sphinx/ha_sphinx.h
index f03e9d8c797..f5651fc6eb5 100644
--- a/storage/sphinx/ha_sphinx.h
+++ b/storage/sphinx/ha_sphinx.h
@@ -72,14 +72,28 @@ public:
uint max_supported_key_length () const { return MAX_KEY_LENGTH; }
uint max_supported_key_part_length () const { return MAX_KEY_LENGTH; }
- #if MYSQL_VERSION_ID>50100
- virtual double scan_time () { return (double)( stats.records+stats.deleted )/20.0 + 10; } ///< called in test_quick_select to determine if indexes should be used
- #else
- virtual double scan_time () { return (double)( records+deleted )/20.0 + 10; } ///< called in test_quick_select to determine if indexes should be used
- #endif
-
- virtual double read_time(uint index, uint ranges, ha_rows rows)
- { return ranges + (double)rows/20.0 + 1; } ///< index read time estimate
+ IO_AND_CPU_COST scan_time ()
+ {
+ IO_AND_CPU_COST cost;
+ cost.io= 0;
+ cost.cpu= (double) (stats.records+stats.deleted) * DISK_READ_COST;
+ return cost;
+ }
+ IO_AND_CPU_COST keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks)
+ {
+ IO_AND_CPU_COST cost;
+ cost.io= ranges;
+ cost.cpu= 0;
+ return cost;
+ }
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows)
+ {
+ IO_AND_CPU_COST cost;
+ cost.io= 0;
+ cost.cpu= 0;
+ return cost;
+ }
public:
int open ( const char * name, int mode, uint test_if_locked );
diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc
index eb691e52b0a..5cf67a091db 100644
--- a/storage/spider/ha_spider.cc
+++ b/storage/spider/ha_spider.cc
@@ -238,7 +238,6 @@ int ha_spider::open(
DBUG_PRINT("info",("spider this=%p", this));
dup_key_idx = (uint) -1;
- conn_kinds = SPIDER_CONN_KIND_MYSQL;
table->file->get_no_parts("", &part_num);
if (part_num)
{
@@ -590,22 +589,7 @@ int ha_spider::check_access_kind_for_connection(
int error_num, roop_count;
DBUG_ENTER("ha_spider::check_access_kind_for_connection");
DBUG_PRINT("info",("spider this=%p", this));
- conn_kinds = 0;
- switch (wide_handler->sql_command)
- {
- case SQLCOM_UPDATE:
- case SQLCOM_UPDATE_MULTI:
- case SQLCOM_DELETE:
- case SQLCOM_DELETE_MULTI:
- default:
- conn_kinds |= SPIDER_CONN_KIND_MYSQL;
- for (roop_count = 0; roop_count < (int) share->link_count; roop_count++)
- {
- conn_kind[roop_count] = SPIDER_CONN_KIND_MYSQL;
- }
- break;
- }
- if ((error_num = spider_check_trx_and_get_conn(thd, this, TRUE)))
+ if ((error_num= spider_check_trx_and_get_conn(thd, this)))
{
DBUG_RETURN(error_num);
}
@@ -1035,8 +1019,6 @@ int ha_spider::reset()
for (roop_count = share->link_count - 1; roop_count >= 0; roop_count--)
{
result_list.update_sqls[roop_count].length(0);
-
- conn_kind[roop_count] = SPIDER_CONN_KIND_MYSQL;
}
result_list.bulk_update_mode = 0;
result_list.bulk_update_size = 0;
@@ -1062,7 +1044,6 @@ int ha_spider::reset()
result_list.use_union = FALSE;
result_list.use_both_key = FALSE;
pt_clone_last_searcher = NULL;
- conn_kinds = SPIDER_CONN_KIND_MYSQL;
use_index_merge = FALSE;
init_rnd_handler = FALSE;
if (multi_range_keys)
@@ -3170,6 +3151,7 @@ ha_rows ha_spider::multi_range_read_info_const(
uint n_ranges,
uint *bufsz,
uint *flags,
+ ha_rows limit,
Cost_estimate *cost
)
{
@@ -3209,6 +3191,7 @@ ha_rows ha_spider::multi_range_read_info_const(
n_ranges,
bufsz,
flags,
+ limit,
cost
);
*flags &= ~HA_MRR_USE_DEFAULT_IMPL;
@@ -6680,8 +6663,7 @@ int ha_spider::info(
pthread_mutex_lock(&share->sts_mutex);
if (difftime(tmp_time, share->sts_get_time) >= sts_interval)
{
- if ((error_num = spider_check_trx_and_get_conn(ha_thd(), this,
- FALSE)))
+ if ((error_num= spider_check_trx_and_get_conn(ha_thd(), this)))
{
pthread_mutex_unlock(&share->sts_mutex);
if (!share->sts_init)
@@ -7266,7 +7248,7 @@ int ha_spider::check_crd()
}
if (crd_mode == 3)
crd_mode = 1;
- if ((error_num = spider_check_trx_and_get_conn(ha_thd(), this, FALSE)))
+ if ((error_num= spider_check_trx_and_get_conn(ha_thd(), this)))
{
DBUG_RETURN(check_error_mode(error_num));
}
@@ -8480,7 +8462,7 @@ int ha_spider::truncate()
DBUG_RETURN(ER_SPIDER_READ_ONLY_NUM);
}
wide_handler->sql_command = SQLCOM_TRUNCATE;
- if ((error_num = spider_check_trx_and_get_conn(thd, this, FALSE)))
+ if ((error_num= spider_check_trx_and_get_conn(thd, this)))
{
DBUG_RETURN(error_num);
}
@@ -8504,38 +8486,47 @@ int ha_spider::truncate()
DBUG_RETURN(0);
}
-
-double ha_spider::scan_time()
+IO_AND_CPU_COST ha_spider::scan_time()
{
+ IO_AND_CPU_COST cost;
DBUG_ENTER("ha_spider::scan_time");
DBUG_PRINT("info",("spider this=%p", this));
- DBUG_PRINT("info",("spider scan_time = %.6f",
- share->scan_rate * share->stat.records * share->stat.mean_rec_length + 2));
- DBUG_RETURN(share->scan_rate * share->stat.records *
- share->stat.mean_rec_length + 2);
+ cost.io=0;
+ cost.cpu= (DISK_READ_COST * share->stat.records * share->stat.mean_rec_length);
+ DBUG_PRINT("info",("spider scan_time = %.6f", cost.cpu));
+ DBUG_RETURN(cost);
}
-double ha_spider::read_time(
- uint index,
- uint ranges,
- ha_rows rows
-) {
- DBUG_ENTER("ha_spider::read_time");
+IO_AND_CPU_COST ha_spider::rnd_pos_time(ha_rows rows)
+{
+ IO_AND_CPU_COST cost= { 0.0, 0.0}; // Row is in memory
+ return cost;
+}
+
+IO_AND_CPU_COST ha_spider::keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks)
+{
+ IO_AND_CPU_COST cost;
+ DBUG_ENTER("ha_spider::keyread_time");
DBUG_PRINT("info",("spider this=%p", this));
+
+ /*
+ Here we only calculate transfer costs. The normal handler cost functions
+ will add costs for accessing a row/key.
+ */
if (wide_handler->keyread)
{
- DBUG_PRINT("info",("spider read_time(keyread) = %.6f",
- share->read_rate * table->key_info[index].key_length *
- rows / 2 + 2));
- DBUG_RETURN(share->read_rate * table->key_info[index].key_length *
- rows / 2 + 2);
+ cost.io= 0;
+ cost.cpu= DISK_READ_COST * rows * table->key_info[index].key_length;
} else {
- DBUG_PRINT("info",("spider read_time = %.6f",
- share->read_rate * share->stat.mean_rec_length * rows + 2));
- DBUG_RETURN(share->read_rate * share->stat.mean_rec_length * rows + 2);
+ cost.io= 0;
+ cost.cpu= DISK_READ_COST * rows * share->stat.mean_rec_length;
}
+ DBUG_PRINT("info",("spider scan_time(keyread) = %.6f", cost.cpu));
+ DBUG_RETURN(cost);
}
+
const key_map *ha_spider::keys_to_use_for_scanning()
{
DBUG_ENTER("ha_spider::keys_to_use_for_scanning");
@@ -12066,8 +12057,7 @@ int ha_spider::append_lock_tables_list()
DBUG_PRINT("info",("spider lock_table_type=%u",
wide_handler->lock_table_type));
- if ((error_num = spider_check_trx_and_get_conn(wide_handler->trx->thd, this,
- FALSE)))
+ if ((error_num= spider_check_trx_and_get_conn(wide_handler->trx->thd, this)))
{
DBUG_RETURN(error_num);
}
diff --git a/storage/spider/ha_spider.h b/storage/spider/ha_spider.h
index 4dffdf78553..ac865e78f2c 100644
--- a/storage/spider/ha_spider.h
+++ b/storage/spider/ha_spider.h
@@ -60,8 +60,6 @@ public:
const char *mem_calc_file_name;
ulong mem_calc_line_no;
ulonglong *connection_ids;
- uint conn_kinds;
- uint *conn_kind;
char *conn_keys_first_ptr;
char **conn_keys;
SPIDER_CONN **conns;
@@ -252,6 +250,7 @@ public:
uint n_ranges,
uint *bufsz,
uint *flags,
+ ha_rows limit,
Cost_estimate *cost
);
ha_rows multi_range_read_info(
@@ -445,12 +444,10 @@ public:
);
int delete_all_rows();
int truncate();
- double scan_time();
- double read_time(
- uint index,
- uint ranges,
- ha_rows rows
- );
+ IO_AND_CPU_COST scan_time();
+ IO_AND_CPU_COST rnd_pos_time(ha_rows rows);
+ IO_AND_CPU_COST keyread_time(uint index, ulong ranges, ha_rows rows,
+ ulonglong blocks);
const key_map *keys_to_use_for_scanning();
ha_rows estimate_rows_upper_bound();
void print_error(
diff --git a/storage/spider/mysql-test/spider/bg/r/spider_fixes.result b/storage/spider/mysql-test/spider/bg/r/spider_fixes.result
index a6a7588b014..2f54ef93a13 100644
--- a/storage/spider/mysql-test/spider/bg/r/spider_fixes.result
+++ b/storage/spider/mysql-test/spider/bg/r/spider_fixes.result
@@ -481,7 +481,6 @@ DELETE FROM t1;
Warnings:
Error 12702 Remote table 'auto_test_remote.ter1_1' is not found
Error 12702 Remote table 'auto_test_remote.ter1_1' is not found
-Error 1146 Table 'auto_test_remote.ter1_1' doesn't exist
TRUNCATE t1;
Warnings:
Error 1146 Table 'auto_test_remote.ter1_1' doesn't exist
diff --git a/storage/spider/mysql-test/spider/bugfix/include/sql_mode_init.inc b/storage/spider/mysql-test/spider/bugfix/include/sql_mode_init.inc
index 09ab2934aea..337979a4f3d 100644
--- a/storage/spider/mysql-test/spider/bugfix/include/sql_mode_init.inc
+++ b/storage/spider/mysql-test/spider/bugfix/include/sql_mode_init.inc
@@ -5,21 +5,7 @@
--enable_result_log
--enable_query_log
--enable_warnings
---let $SQL_MODES= real_as_float,pipes_as_concat,ansi_quotes,ignore_space,ignore_bad_table_options,only_full_group_by,no_unsigned_subtraction,no_dir_in_create,postgresql,oracle,mssql,db2,maxdb,no_key_options,no_table_options,no_field_options,mysql323,mysql40,ansi,no_auto_value_on_zero,no_backslash_escapes,strict_trans_tables,strict_all_tables,no_zero_in_date,no_zero_date,allow_invalid_dates,error_for_division_by_zero,traditional,no_auto_create_user,high_not_precedence,no_engine_substitution,pad_char_to_full_length
-if (`SELECT IF(STRCMP('$SERVER_NAME', 'MariaDB') = 0, 1, 0)`)
-{
- if (`SELECT IF($SERVER_MAJOR_VERSION = 10, 1, 0)`)
- {
- if (`SELECT IF($SERVER_MINOR_VERSION >= 3, 1, 0)`)
- {
- --let $SQL_MODES= $SQL_MODES,empty_string_is_null,simultaneous_assignment
- }
- if (`SELECT IF($SERVER_MINOR_VERSION >= 4, 1, 0)`)
- {
- --let $SQL_MODES= $SQL_MODES,time_round_fractional
- }
- }
-}
+--let $SQL_MODES= real_as_float,pipes_as_concat,ansi_quotes,ignore_space,ignore_bad_table_options,only_full_group_by,no_unsigned_subtraction,no_dir_in_create,postgresql,oracle,mssql,db2,maxdb,no_key_options,no_table_options,no_field_options,mysql323,mysql40,ansi,no_auto_value_on_zero,no_backslash_escapes,strict_trans_tables,strict_all_tables,no_zero_in_date,no_zero_date,allow_invalid_dates,error_for_division_by_zero,traditional,no_auto_create_user,high_not_precedence,no_engine_substitution,pad_char_to_full_length,empty_string_is_null,simultaneous_assignment,time_round_fractional
--connection master_1
set @old_sql_mode= @@sql_mode;
eval set session sql_mode= '$SQL_MODES';
diff --git a/storage/spider/mysql-test/spider/bugfix/r/quick_mode_1.result b/storage/spider/mysql-test/spider/bugfix/r/quick_mode_1.result
index 89a07bf64e6..62e1b2e64b2 100644
--- a/storage/spider/mysql-test/spider/bugfix/r/quick_mode_1.result
+++ b/storage/spider/mysql-test/spider/bugfix/r/quick_mode_1.result
@@ -57,6 +57,10 @@ TRUNCATE TABLE mysql.general_log;
connection child2_2;
TRUNCATE TABLE mysql.general_log;
connection master_1;
+explain SELECT a.pkey FROM tbl_a a, tbl_b b WHERE a.pkey = b.pkey;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE a index PRIMARY PRIMARY 4 NULL 2 Using index
+1 SIMPLE b eq_ref PRIMARY PRIMARY 4 auto_test_local.a.pkey 1 Using index
SELECT a.pkey FROM tbl_a a, tbl_b b WHERE a.pkey = b.pkey;
pkey
0
diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_22246.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_22246.test
index 9e58bc1a836..be993647bb9 100644
--- a/storage/spider/mysql-test/spider/bugfix/t/mdev_22246.test
+++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_22246.test
@@ -64,6 +64,7 @@ TRUNCATE TABLE mysql.general_log;
--connection master_1
SELECT * FROM tbl_a;
+--sorted_result
SELECT * FROM tbl_a WHERE id <0 || id >0;
--connection child2_1
diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_27172.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_27172.test
index 60c0ad42921..02a4b803a89 100644
--- a/storage/spider/mysql-test/spider/bugfix/t/mdev_27172.test
+++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_27172.test
@@ -2,6 +2,10 @@
--echo # MDEV-27172 Prefix indices on Spider tables may lead to wrong query results
--echo #
+# Disable test for ps-protocol as the general log has different number of
+# commands for --ps
+--source include/no_protocol.inc
+
--disable_query_log
--disable_result_log
--source ../../t/test_init.inc
diff --git a/storage/spider/mysql-test/spider/bugfix/t/quick_mode_1.test b/storage/spider/mysql-test/spider/bugfix/t/quick_mode_1.test
index 01fa0cb5128..c878a738c53 100644
--- a/storage/spider/mysql-test/spider/bugfix/t/quick_mode_1.test
+++ b/storage/spider/mysql-test/spider/bugfix/t/quick_mode_1.test
@@ -74,6 +74,7 @@ TRUNCATE TABLE mysql.general_log;
TRUNCATE TABLE mysql.general_log;
--connection master_1
+explain SELECT a.pkey FROM tbl_a a, tbl_b b WHERE a.pkey = b.pkey;
SELECT a.pkey FROM tbl_a a, tbl_b b WHERE a.pkey = b.pkey;
--connection child2_1
diff --git a/storage/spider/mysql-test/spider/r/direct_left_join_nullable.result b/storage/spider/mysql-test/spider/r/direct_left_join_nullable.result
index 4adfb1bd76a..83ec42044a5 100644
--- a/storage/spider/mysql-test/spider/r/direct_left_join_nullable.result
+++ b/storage/spider/mysql-test/spider/r/direct_left_join_nullable.result
@@ -87,7 +87,7 @@ a b c a
connection child2_1;
SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%select %';
argument
-select t0.`a` `a`,t2.`b` `b`,t2.`c` `c`,t3.`a` `a` from `auto_test_remote`.`ta_r_no_idx` t0 left join `auto_test_remote`.`ta_r_auto_inc` t1 on (t1.`a` = t0.`a`) left join `auto_test_remote`.`ta_r_3` t2 on (t2.`c` = t1.`c`) left join `auto_test_remote`.`ta_r` t3 on (t3.`b` = t2.`b`) where 1 order by t0.`a` desc
+select t0.`a` `a`,t2.`b` `b`,t2.`c` `c`,t3.`a` `a` from `auto_test_remote`.`ta_r_no_idx` t0 left join `auto_test_remote`.`ta_r_auto_inc` t1 on ((t1.`a` = t0.`a`) and (t0.`a` is not null)) left join `auto_test_remote`.`ta_r_3` t2 on (t2.`c` = t1.`c`) left join `auto_test_remote`.`ta_r` t3 on ((t3.`b` = t2.`b`) and (t2.`b` is not null)) where 1 order by t0.`a` desc
SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%select %'
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_r ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
diff --git a/storage/spider/mysql-test/spider/r/direct_left_right_join_nullable.result b/storage/spider/mysql-test/spider/r/direct_left_right_join_nullable.result
index a6bd3a7c1a1..ff4f211faf5 100644
--- a/storage/spider/mysql-test/spider/r/direct_left_right_join_nullable.result
+++ b/storage/spider/mysql-test/spider/r/direct_left_right_join_nullable.result
@@ -87,7 +87,7 @@ NULL NULL NULL 3
connection child2_1;
SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%select %';
argument
-select t0.`a` `a`,t2.`b` `b`,t2.`c` `c`,t3.`a` `a` from `auto_test_remote`.`ta_r_no_idx` t3 left join (`auto_test_remote`.`ta_r_auto_inc` t2 join `auto_test_remote`.`ta_r_3` t1 join `auto_test_remote`.`ta_r` t0) on ((t2.`b` = t3.`b`) and (t2.`c` = t1.`c`) and (t0.`a` = t1.`a`) and (t1.`a` is not null)) where 1 order by t3.`a` desc
+select t0.`a` `a`,t2.`b` `b`,t2.`c` `c`,t3.`a` `a` from `auto_test_remote`.`ta_r_no_idx` t3 left join (`auto_test_remote`.`ta_r_auto_inc` t2 join `auto_test_remote`.`ta_r_3` t1 join `auto_test_remote`.`ta_r` t0) on ((t2.`b` = t3.`b`) and (t1.`c` = t2.`c`) and (t0.`a` = t1.`a`) and (t3.`b` is not null) and (t1.`a` is not null)) where 1 order by t3.`a` desc
SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%select %'
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_r ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
diff --git a/storage/spider/mysql-test/spider/r/direct_right_join_nullable.result b/storage/spider/mysql-test/spider/r/direct_right_join_nullable.result
index 5101ea5036a..02f985279f8 100644
--- a/storage/spider/mysql-test/spider/r/direct_right_join_nullable.result
+++ b/storage/spider/mysql-test/spider/r/direct_right_join_nullable.result
@@ -87,7 +87,7 @@ NULL c 2000-01-03 00:00:00 3
connection child2_1;
SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%select %';
argument
-select t0.`a` `a`,t2.`b` `b`,t2.`c` `c`,t3.`a` `a` from `auto_test_remote`.`ta_r_no_idx` t3 left join `auto_test_remote`.`ta_r_auto_inc` t2 on (t2.`b` = t3.`b`) left join `auto_test_remote`.`ta_r_3` t1 on (t1.`c` = t2.`c`) left join `auto_test_remote`.`ta_r` t0 on ((t0.`a` = t1.`a`) and (t1.`a` is not null)) where 1 order by t3.`a` desc
+select t0.`a` `a`,t2.`b` `b`,t2.`c` `c`,t3.`a` `a` from `auto_test_remote`.`ta_r_no_idx` t3 left join `auto_test_remote`.`ta_r_auto_inc` t2 on ((t2.`b` = t3.`b`) and (t3.`b` is not null)) left join `auto_test_remote`.`ta_r_3` t1 on (t1.`c` = t2.`c`) left join `auto_test_remote`.`ta_r` t0 on ((t0.`a` = t1.`a`) and (t1.`a` is not null)) where 1 order by t3.`a` desc
SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%select %'
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_r ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
diff --git a/storage/spider/mysql-test/spider/r/direct_right_left_right_join_nullable.result b/storage/spider/mysql-test/spider/r/direct_right_left_right_join_nullable.result
index f6c808be973..840328508fa 100644
--- a/storage/spider/mysql-test/spider/r/direct_right_left_right_join_nullable.result
+++ b/storage/spider/mysql-test/spider/r/direct_right_left_right_join_nullable.result
@@ -87,7 +87,7 @@ NULL c 2000-01-03 00:00:00 3
connection child2_1;
SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%select %';
argument
-select t0.`a` `a`,t2.`b` `b`,t2.`c` `c`,t3.`a` `a` from `auto_test_remote`.`ta_r_no_idx` t3 left join (`auto_test_remote`.`ta_r_auto_inc` t2 join `auto_test_remote`.`ta_r_3` t1 left join `auto_test_remote`.`ta_r` t0 on ((t0.`a` = t1.`a`) and (t1.`a` is not null))) on ((t2.`b` = t3.`b`) and (t2.`c` = t1.`c`)) where 1 order by t3.`a` desc
+select t0.`a` `a`,t2.`b` `b`,t2.`c` `c`,t3.`a` `a` from `auto_test_remote`.`ta_r_no_idx` t3 left join (`auto_test_remote`.`ta_r_auto_inc` t2 join `auto_test_remote`.`ta_r_3` t1 left join `auto_test_remote`.`ta_r` t0 on ((t0.`a` = t1.`a`) and (t1.`a` is not null))) on ((t2.`b` = t3.`b`) and (t1.`c` = t2.`c`) and (t3.`b` is not null)) where 1 order by t3.`a` desc
SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%select %'
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_r ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
diff --git a/storage/spider/mysql-test/spider/r/partition_mrr.result b/storage/spider/mysql-test/spider/r/partition_mrr.result
index c1b7d6e6a4a..61878a15698 100644
--- a/storage/spider/mysql-test/spider/r/partition_mrr.result
+++ b/storage/spider/mysql-test/spider/r/partition_mrr.result
@@ -74,36 +74,36 @@ TRUNCATE TABLE mysql.general_log;
connection master_1;
SELECT a.pkey FROM tbl_a a, tbl_b b WHERE a.pkey = b.pkey;
pkey
-4
-5
+0
+1
10
11
+12
+13
+14
+15
16
17
+18
+19
+2
+20
+21
22
23
+24
+25
+26
+27
28
29
-0
-1
+3
+4
+5
6
7
-12
-13
-18
-19
-24
-25
-2
-3
8
9
-14
-15
-20
-21
-26
-27
SELECT a.pkey FROM tbl_a a, tbl_b b WHERE a.pkey+0 = b.pkey+0 ORDER BY a.pkey;
pkey
0
@@ -140,7 +140,9 @@ connection child2_1;
SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%select %';
argument
select `pkey` from `auto_test_remote`.`tbl_a` order by `pkey`
-select a.id,b.`pkey` from auto_test_remote.tmp_spider_bka_xxxx a,`auto_test_remote`.`tbl_b` b where a.c0 <=> b.`pkey`
+select `pkey` from `auto_test_remote`.`tbl_b` order by `pkey`
+select `pkey` from `auto_test_remote`.`tbl_b` order by `pkey`
+select `pkey` from `auto_test_remote`.`tbl_b` order by `pkey`
select `pkey` from `auto_test_remote`.`tbl_a` order by `pkey`
select `pkey` from `auto_test_remote`.`tbl_b` order by `pkey`
select `pkey` from `auto_test_remote`.`tbl_b` order by `pkey`
@@ -174,7 +176,9 @@ connection child2_2;
SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%select %';
argument
select `pkey` from `auto_test_remote2`.`tbl_a` order by `pkey`
-select a.id,b.`pkey` from auto_test_remote2.tmp_spider_bka_xxxx a,`auto_test_remote2`.`tbl_b` b where a.c0 <=> b.`pkey`
+select `pkey` from `auto_test_remote2`.`tbl_b` order by `pkey`
+select `pkey` from `auto_test_remote2`.`tbl_b` order by `pkey`
+select `pkey` from `auto_test_remote2`.`tbl_b` order by `pkey`
select `pkey` from `auto_test_remote2`.`tbl_a` order by `pkey`
select `pkey` from `auto_test_remote2`.`tbl_b` order by `pkey`
select `pkey` from `auto_test_remote2`.`tbl_b` order by `pkey`
@@ -208,7 +212,9 @@ connection child2_3;
SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%select %';
argument
select `pkey` from `auto_test_remote3`.`tbl_a` order by `pkey`
-select a.id,b.`pkey` from auto_test_remote3.tmp_spider_bka_xxxx a,`auto_test_remote3`.`tbl_b` b where a.c0 <=> b.`pkey`
+select `pkey` from `auto_test_remote3`.`tbl_b` order by `pkey`
+select `pkey` from `auto_test_remote3`.`tbl_b` order by `pkey`
+select `pkey` from `auto_test_remote3`.`tbl_b` order by `pkey`
select `pkey` from `auto_test_remote3`.`tbl_a` order by `pkey`
select `pkey` from `auto_test_remote3`.`tbl_b` order by `pkey`
select `pkey` from `auto_test_remote3`.`tbl_b` order by `pkey`
diff --git a/storage/spider/mysql-test/spider/r/spider_fixes.result b/storage/spider/mysql-test/spider/r/spider_fixes.result
index 3b9d939393a..5e17e83618e 100644
--- a/storage/spider/mysql-test/spider/r/spider_fixes.result
+++ b/storage/spider/mysql-test/spider/r/spider_fixes.result
@@ -481,7 +481,6 @@ DELETE FROM t1;
Warnings:
Error 12702 Remote table 'auto_test_remote.ter1_1' is not found
Error 12702 Remote table 'auto_test_remote.ter1_1' is not found
-Error 1146 Table 'auto_test_remote.ter1_1' doesn't exist
TRUNCATE t1;
Warnings:
Error 1146 Table 'auto_test_remote.ter1_1' doesn't exist
diff --git a/storage/spider/mysql-test/spider/t/partition_mrr.test b/storage/spider/mysql-test/spider/t/partition_mrr.test
index 23f4fdb6e27..6c431401e18 100644
--- a/storage/spider/mysql-test/spider/t/partition_mrr.test
+++ b/storage/spider/mysql-test/spider/t/partition_mrr.test
@@ -168,6 +168,7 @@ if ($USE_CHILD_GROUP2)
}
}
--connection master_1
+--sorted_result
SELECT a.pkey FROM tbl_a a, tbl_b b WHERE a.pkey = b.pkey;
SELECT a.pkey FROM tbl_a a, tbl_b b WHERE a.pkey+0 = b.pkey+0 ORDER BY a.pkey; # MDEV-29947
if ($USE_CHILD_GROUP2)
diff --git a/storage/spider/spd_conn.cc b/storage/spider/spd_conn.cc
index 0f252012bd9..ca556702c65 100644
--- a/storage/spider/spd_conn.cc
+++ b/storage/spider/spd_conn.cc
@@ -108,7 +108,6 @@ uchar *spider_conn_get_key(
) {
DBUG_ENTER("spider_conn_get_key");
*length = conn->conn_key_length;
- DBUG_PRINT("info",("spider conn_kind=%u", conn->conn_kind));
#ifdef DBUG_TRACE
spider_print_keys(conn->conn_key, conn->conn_key_length);
#endif
@@ -382,7 +381,6 @@ SPIDER_CONN *spider_create_conn(
ha_spider *spider,
int link_idx,
int base_link_idx,
- uint conn_kind,
int *error_num
) {
int *need_mon;
@@ -602,7 +600,6 @@ SPIDER_CONN *spider_create_conn(
conn->semi_trx_isolation_chk = FALSE;
conn->semi_trx_chk = FALSE;
conn->link_idx = base_link_idx;
- conn->conn_kind = conn_kind;
conn->conn_need_mon = need_mon;
if (spider)
conn->need_mon = &spider->need_mons[base_link_idx];
@@ -689,13 +686,11 @@ SPIDER_CONN *spider_get_conn(
ha_spider *spider,
bool another,
bool thd_chg,
- uint conn_kind,
int *error_num
) {
SPIDER_CONN *conn = NULL;
int base_link_idx = link_idx;
DBUG_ENTER("spider_get_conn");
- DBUG_PRINT("info",("spider conn_kind=%u", conn_kind));
if (spider)
link_idx = spider->conn_link_idx[base_link_idx];
@@ -734,7 +729,8 @@ SPIDER_CONN *spider_get_conn(
pthread_mutex_unlock(&spider_conn_mutex);
if (spider_param_max_connections())
{ /* enable connection pool */
- conn = spider_get_conn_from_idle_connection(share, link_idx, conn_key, spider, conn_kind, base_link_idx, error_num);
+ conn= spider_get_conn_from_idle_connection(
+ share, link_idx, conn_key, spider, base_link_idx, error_num);
/* failed get conn, goto error */
if (!conn)
goto error;
@@ -743,8 +739,8 @@ SPIDER_CONN *spider_get_conn(
else
{ /* did not enable conncetion pool , create_conn */
DBUG_PRINT("info",("spider create new conn"));
- if (!(conn = spider_create_conn(share, spider, link_idx,
- base_link_idx, conn_kind, error_num)))
+ if (!(conn= spider_create_conn(share, spider, link_idx,
+ base_link_idx, error_num)))
goto error;
*conn->conn_key = *conn_key;
if (spider)
@@ -768,8 +764,8 @@ SPIDER_CONN *spider_get_conn(
} else {
DBUG_PRINT("info",("spider create new conn"));
/* conn_recycle_strict = 0 and conn_recycle_mode = 0 or 2 */
- if (!(conn = spider_create_conn(share, spider, link_idx, base_link_idx,
- conn_kind, error_num)))
+ if (!(conn= spider_create_conn(share, spider, link_idx, base_link_idx,
+ error_num)))
goto error;
*conn->conn_key = *conn_key;
if (spider)
@@ -892,13 +888,10 @@ int spider_check_and_get_casual_read_conn(
char first_byte_bak = *spider->conn_keys[link_idx];
*spider->conn_keys[link_idx] =
'0' + spider->result_list.casual_read[link_idx];
- if (
- !(spider->conns[link_idx] =
- spider_get_conn(spider->share, link_idx,
- spider->conn_keys[link_idx], spider->wide_handler->trx,
- spider, FALSE, TRUE, SPIDER_CONN_KIND_MYSQL,
- &error_num))
- ) {
+ if (!(spider->conns[link_idx]= spider_get_conn(
+ spider->share, link_idx, spider->conn_keys[link_idx],
+ spider->wide_handler->trx, spider, FALSE, TRUE, &error_num)))
+ {
*spider->conn_keys[link_idx] = first_byte_bak;
DBUG_RETURN(error_num);
}
@@ -3017,9 +3010,8 @@ void *spider_bg_sts_action(
if (!conns[spider.search_link_idx])
{
spider_get_conn(share, spider.search_link_idx,
- share->conn_keys[spider.search_link_idx],
- trx, &spider, FALSE, FALSE, SPIDER_CONN_KIND_MYSQL,
- &error_num);
+ share->conn_keys[spider.search_link_idx], trx,
+ &spider, FALSE, FALSE, &error_num);
conns[spider.search_link_idx]->error_mode = 0;
/*
if (
@@ -3342,9 +3334,8 @@ void *spider_bg_crd_action(
if (!conns[spider.search_link_idx])
{
spider_get_conn(share, spider.search_link_idx,
- share->conn_keys[spider.search_link_idx],
- trx, &spider, FALSE, FALSE, SPIDER_CONN_KIND_MYSQL,
- &error_num);
+ share->conn_keys[spider.search_link_idx], trx,
+ &spider, FALSE, FALSE, &error_num);
conns[spider.search_link_idx]->error_mode = 0;
/*
if (
@@ -3911,7 +3902,6 @@ SPIDER_CONN* spider_get_conn_from_idle_connection(
int link_idx,
char *conn_key,
ha_spider *spider,
- uint conn_kind,
int base_link_idx,
int *error_num
)
@@ -3999,7 +3989,8 @@ SPIDER_CONN* spider_get_conn_from_idle_connection(
if (ip_port_conn)
pthread_mutex_unlock(&ip_port_conn->mutex);
DBUG_PRINT("info",("spider create new conn"));
- if (!(conn = spider_create_conn(share, spider, link_idx, base_link_idx, conn_kind, error_num)))
+ if (!(conn= spider_create_conn(share, spider, link_idx, base_link_idx,
+ error_num)))
DBUG_RETURN(conn);
*conn->conn_key = *conn_key;
if (spider)
diff --git a/storage/spider/spd_conn.h b/storage/spider/spd_conn.h
index 807e1474ed2..1759f06baa6 100644
--- a/storage/spider/spd_conn.h
+++ b/storage/spider/spd_conn.h
@@ -84,7 +84,6 @@ SPIDER_CONN *spider_create_conn(
ha_spider *spider,
int link_id,
int base_link_id,
- uint conn_kind,
int *error_num
);
@@ -96,7 +95,6 @@ SPIDER_CONN *spider_get_conn(
ha_spider *spider,
bool another,
bool thd_chg,
- uint conn_kind,
int *error_num
);
@@ -397,7 +395,6 @@ SPIDER_CONN* spider_get_conn_from_idle_connection
int link_idx,
char *conn_key,
ha_spider *spider,
- uint conn_kind,
int base_link_idx,
int *error_num
);
diff --git a/storage/spider/spd_copy_tables.cc b/storage/spider/spd_copy_tables.cc
index eb2454b2f29..5e28b590309 100644
--- a/storage/spider/spd_copy_tables.cc
+++ b/storage/spider/spd_copy_tables.cc
@@ -593,11 +593,10 @@ int spider_udf_get_copy_tgt_conns(
while (table_conn)
{
share = table_conn->share;
- if (
- !(table_conn->conn = spider_get_conn(
- share, 0, share->conn_keys[0], trx, NULL, FALSE, FALSE,
- SPIDER_CONN_KIND_MYSQL, &error_num))
- ) {
+ if (!(table_conn->conn=
+ spider_get_conn(share, 0, share->conn_keys[0], trx, NULL,
+ FALSE, FALSE, &error_num)))
+ {
my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), share->server_names[0]);
DBUG_RETURN(ER_CONNECT_TO_FOREIGN_DATA_SOURCE);
}
diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc
index b64aaab4d58..9c91d666c0a 100644
--- a/storage/spider/spd_db_conn.cc
+++ b/storage/spider/spd_db_conn.cc
@@ -80,7 +80,7 @@ int spider_db_connect(
THD* thd = current_thd;
longlong connect_retry_interval;
DBUG_ENTER("spider_db_connect");
- DBUG_ASSERT(conn->conn_kind != SPIDER_CONN_KIND_MYSQL || conn->need_mon);
+ DBUG_ASSERT(conn->need_mon);
DBUG_PRINT("info",("spider link_idx=%d", link_idx));
DBUG_PRINT("info",("spider conn=%p", conn));
@@ -240,7 +240,6 @@ void spider_db_disconnect(
) {
DBUG_ENTER("spider_db_disconnect");
DBUG_PRINT("info",("spider conn=%p", conn));
- DBUG_PRINT("info",("spider conn->conn_kind=%u", conn->conn_kind));
if (conn->db_conn->is_connected())
{
conn->db_conn->disconnect();
diff --git a/storage/spider/spd_db_include.h b/storage/spider/spd_db_include.h
index bbe27271e37..8b2ebb821df 100644
--- a/storage/spider/spd_db_include.h
+++ b/storage/spider/spd_db_include.h
@@ -168,8 +168,6 @@ typedef st_spider_result SPIDER_RESULT;
#define SPIDER_SQL_LOP_CHK_PRM_PRF_STR "spider_lc_"
#define SPIDER_SQL_LOP_CHK_PRM_PRF_LEN (sizeof(SPIDER_SQL_LOP_CHK_PRM_PRF_STR) - 1)
-#define SPIDER_CONN_KIND_MYSQL (1 << 0)
-
#define SPIDER_SQL_TYPE_SELECT_SQL (1 << 0)
#define SPIDER_SQL_TYPE_INSERT_SQL (1 << 1)
#define SPIDER_SQL_TYPE_UPDATE_SQL (1 << 2)
diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc
index 78236243bf2..21c2e6bb434 100644
--- a/storage/spider/spd_db_mysql.cc
+++ b/storage/spider/spd_db_mysql.cc
@@ -5839,88 +5839,7 @@ int spider_db_mbase_util::open_item_func(
alias, alias_length, dbton_id, use_fields, fields));
} else if (!strncasecmp("timestampdiff", func_name, func_name_length))
{
-#ifdef ITEM_FUNC_TIMESTAMPDIFF_ARE_PUBLIC
- Item_func_timestamp_diff *item_func_timestamp_diff =
- (Item_func_timestamp_diff *) item_func;
- if (str)
- {
- const char *interval_str;
- uint interval_len;
- switch (item_func_timestamp_diff->int_type)
- {
- case INTERVAL_YEAR:
- interval_str = SPIDER_SQL_YEAR_STR;
- interval_len = SPIDER_SQL_YEAR_LEN;
- break;
- case INTERVAL_QUARTER:
- interval_str = SPIDER_SQL_QUARTER_STR;
- interval_len = SPIDER_SQL_QUARTER_LEN;
- break;
- case INTERVAL_MONTH:
- interval_str = SPIDER_SQL_MONTH_STR;
- interval_len = SPIDER_SQL_MONTH_LEN;
- break;
- case INTERVAL_WEEK:
- interval_str = SPIDER_SQL_WEEK_STR;
- interval_len = SPIDER_SQL_WEEK_LEN;
- break;
- case INTERVAL_DAY:
- interval_str = SPIDER_SQL_DAY_STR;
- interval_len = SPIDER_SQL_DAY_LEN;
- break;
- case INTERVAL_HOUR:
- interval_str = SPIDER_SQL_HOUR_STR;
- interval_len = SPIDER_SQL_HOUR_LEN;
- break;
- case INTERVAL_MINUTE:
- interval_str = SPIDER_SQL_MINUTE_STR;
- interval_len = SPIDER_SQL_MINUTE_LEN;
- break;
- case INTERVAL_SECOND:
- interval_str = SPIDER_SQL_SECOND_STR;
- interval_len = SPIDER_SQL_SECOND_LEN;
- break;
- case INTERVAL_MICROSECOND:
- interval_str = SPIDER_SQL_MICROSECOND_STR;
- interval_len = SPIDER_SQL_MICROSECOND_LEN;
- break;
- default:
- interval_str = "";
- interval_len = 0;
- break;
- }
- str->length(str->length() - SPIDER_SQL_OPEN_PAREN_LEN);
- if (str->reserve(func_name_length + SPIDER_SQL_OPEN_PAREN_LEN +
- interval_len + SPIDER_SQL_COMMA_LEN))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- str->q_append(func_name, func_name_length);
- str->q_append(SPIDER_SQL_OPEN_PAREN_STR, SPIDER_SQL_OPEN_PAREN_LEN);
- str->q_append(interval_str, interval_len);
- str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN);
- }
- if ((error_num = spider_db_print_item_type(item_list[0], NULL, spider,
- str, alias, alias_length, dbton_id, use_fields, fields)))
- DBUG_RETURN(error_num);
- if (str)
- {
- if (str->reserve(SPIDER_SQL_COMMA_LEN))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN);
- }
- if ((error_num = spider_db_print_item_type(item_list[1], NULL, spider,
- str, alias, alias_length, dbton_id, use_fields, fields)))
- DBUG_RETURN(error_num);
- if (str)
- {
- if (str->reserve(SPIDER_SQL_CLOSE_PAREN_LEN))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- str->q_append(SPIDER_SQL_CLOSE_PAREN_STR,
- SPIDER_SQL_CLOSE_PAREN_LEN);
- }
- DBUG_RETURN(0);
-#else
DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM);
-#endif
}
} else if (func_name_length == 14)
{
@@ -8298,10 +8217,10 @@ int spider_mbase_share::discover_table_structure(
SPIDER_CONN *conn;
int need_mon;
- if (!(conn = spider_get_conn(
- spider_share, 0, spider_share->conn_keys[roop_count], trx, NULL, FALSE,
- FALSE, SPIDER_CONN_KIND_MYSQL, &error_num))
- ) {
+ if (!(conn= spider_get_conn(spider_share, 0,
+ spider_share->conn_keys[roop_count], trx, NULL,
+ FALSE, FALSE, &error_num)))
+ {
DBUG_RETURN(error_num);
}
pthread_mutex_assert_not_owner(&conn->mta_conn_mutex);
diff --git a/storage/spider/spd_direct_sql.cc b/storage/spider/spd_direct_sql.cc
index 429c8fa9ae7..40486073730 100644
--- a/storage/spider/spd_direct_sql.cc
+++ b/storage/spider/spd_direct_sql.cc
@@ -551,7 +551,6 @@ SPIDER_CONN *spider_udf_direct_sql_create_conn(
conn->semi_trx_isolation = -2;
conn->semi_trx_isolation_chk = FALSE;
conn->semi_trx_chk = FALSE;
- conn->conn_kind = SPIDER_CONN_KIND_MYSQL;
if (mysql_mutex_init(spd_key_mutex_mta_conn, &conn->mta_conn_mutex,
MY_MUTEX_INIT_FAST))
@@ -697,7 +696,6 @@ SPIDER_CONN *spider_udf_direct_sql_get_conn(
conn->queued_ping = FALSE;
DBUG_PRINT("info",("spider conn=%p", conn));
- DBUG_PRINT("info",("spider conn->conn_kind=%u", conn->conn_kind));
DBUG_RETURN(conn);
error:
diff --git a/storage/spider/spd_include.h b/storage/spider/spd_include.h
index 26acaaa86ec..e6d4c2dca87 100644
--- a/storage/spider/spd_include.h
+++ b/storage/spider/spd_include.h
@@ -85,7 +85,6 @@
#define SPIDER_TEST(A) MY_TEST(A)
-#define SPIDER_FIELD_FIELDPTR_REQUIRES_THDPTR
#define SPIDER_ENGINE_CONDITION_PUSHDOWN_IS_ALWAYS_ON
#define SPIDER_Item_args_arg_count_IS_PROTECTED
@@ -328,7 +327,6 @@ typedef struct st_spider_conn_loop_check SPIDER_CONN_LOOP_CHECK;
/* database connection */
typedef struct st_spider_conn
{
- uint conn_kind;
char *conn_key;
uint conn_key_length;
my_hash_value_type conn_key_hash_value;
diff --git a/storage/spider/spd_init_query.h b/storage/spider/spd_init_query.h
index e66e94d8373..c3bea1c166b 100644
--- a/storage/spider/spd_init_query.h
+++ b/storage/spider/spd_init_query.h
@@ -538,16 +538,15 @@ static LEX_STRING spider_init_queries[] = {
" engine=Aria transactional=1;"
" end if;"
" end if;"
+/*
+ tables for ddl pushdown
+*/
+/*
" if @server_name = 'MariaDB' and"
" ("
- " @server_major_version > 10 or"
- " ("
- " @server_major_version = 10 and"
- " @server_minor_version >= 999"
- " )"
+ " @server_major_version > 11"
" )"
" then"
- " /* table for ddl pushdown */"
" create table if not exists mysql.spider_rewrite_tables("
" table_id bigint unsigned not null auto_increment,"
" db_name char(64) not null default '',"
@@ -602,6 +601,7 @@ static LEX_STRING spider_init_queries[] = {
" primary key (db_name, table_name, table_id, partition_id)"
" ) engine=Aria transactional=1 default charset=utf8 collate=utf8_bin;"
" end if;"
+*/
/*
Fix for version 3.4
*/
@@ -798,18 +798,15 @@ static LEX_STRING spider_init_queries[] = {
" soname 'ha_spider.dll';"
" end if;"
" end if;"
- " if @server_name = 'MariaDB' and"
- " ("
- " @server_major_version > 10 or"
- " ("
- " @server_major_version = 10 and"
- " @server_minor_version >= 999"
- " )"
- " )"
- " then"
/*
Install spider_rewrite plugin
*/
+/*
+ " if @server_name = 'MariaDB' and "
+ " ("
+ " @server_major_version > 11"
+ " )"
+ " then"
" set @have_spider_i_s_rewrite_plugin := 0;"
" select @have_spider_i_s_rewrite_plugin := 1"
" from INFORMATION_SCHEMA.plugins"
@@ -819,11 +816,6 @@ static LEX_STRING spider_init_queries[] = {
" where name = 'spider_rewrite';"
" if @have_spider_i_s_rewrite_plugin = 0 then"
" if @have_spider_rewrite_plugin = 1 then"
- " /*"
- " spider_rewrite plugin is present in mysql.plugin but not in"
- " information_schema.plugins. Remove spider_rewrite plugin entry"
- " in mysql.plugin first."
- " */"
" delete from mysql.plugin where name = 'spider_rewrite';"
" end if;"
" if @win_plugin = 0 then "
@@ -845,6 +837,7 @@ static LEX_STRING spider_init_queries[] = {
" end if;"
" end if;"
" end if;"
+*/
"end;"
)},
{C_STRING_WITH_LEN(
diff --git a/storage/spider/spd_ping_table.cc b/storage/spider/spd_ping_table.cc
index b331a9fec0d..e82a5925265 100644
--- a/storage/spider/spd_ping_table.cc
+++ b/storage/spider/spd_ping_table.cc
@@ -594,11 +594,9 @@ SPIDER_CONN *spider_get_ping_table_tgt_conn(
) {
SPIDER_CONN *conn;
DBUG_ENTER("spider_get_ping_table_tgt_conn");
- if (
- !(conn = spider_get_conn(
- share, 0, share->conn_keys[0], trx, NULL, FALSE, FALSE,
- SPIDER_CONN_KIND_MYSQL, error_num))
- ) {
+ if (!(conn= spider_get_conn(share, 0, share->conn_keys[0], trx, NULL, FALSE,
+ FALSE, error_num)))
+ {
my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0),
share->server_names[0]);
*error_num = ER_CONNECT_TO_FOREIGN_DATA_SOURCE;
diff --git a/storage/spider/spd_sys_table.cc b/storage/spider/spd_sys_table.cc
index a0cf104d46e..df95336cc19 100644
--- a/storage/spider/spd_sys_table.cc
+++ b/storage/spider/spd_sys_table.cc
@@ -3572,24 +3572,13 @@ TABLE *spider_mk_sys_tmp_table(
TABLE *tmp_table;
DBUG_ENTER("spider_mk_sys_tmp_table");
-#ifdef SPIDER_FIELD_FIELDPTR_REQUIRES_THDPTR
if (!(field = new (thd->mem_root) Field_blob(
4294967295U, FALSE, field_name, cs, TRUE)))
goto error_alloc_field;
-#else
- if (!(field = new Field_blob(
- 4294967295U, FALSE, field_name, cs, TRUE)))
- goto error_alloc_field;
-#endif
field->init(table);
-#ifdef SPIDER_FIELD_FIELDPTR_REQUIRES_THDPTR
if (!(i_field = new (thd->mem_root) Item_field(thd, (Field *) field)))
goto error_alloc_item_field;
-#else
- if (!(i_field = new Item_field((Field *) field)))
- goto error_alloc_item_field;
-#endif
if (i_list.push_back(i_field))
goto error_push_item;
@@ -3650,68 +3639,35 @@ TABLE *spider_mk_sys_tmp_table_for_result(
TABLE *tmp_table;
DBUG_ENTER("spider_mk_sys_tmp_table_for_result");
-#ifdef SPIDER_FIELD_FIELDPTR_REQUIRES_THDPTR
if (!(field1 = new (thd->mem_root) Field_blob(
4294967295U, FALSE, field_name1, cs, TRUE)))
goto error_alloc_field1;
-#else
- if (!(field1 = new Field_blob(
- 4294967295U, FALSE, field_name1, cs, TRUE)))
- goto error_alloc_field1;
-#endif
field1->init(table);
-#ifdef SPIDER_FIELD_FIELDPTR_REQUIRES_THDPTR
if (!(i_field1 = new (thd->mem_root) Item_field(thd, (Field *) field1)))
goto error_alloc_item_field1;
-#else
- if (!(i_field1 = new Item_field((Field *) field1)))
- goto error_alloc_item_field1;
-#endif
if (i_list.push_back(i_field1))
goto error_push_item1;
-#ifdef SPIDER_FIELD_FIELDPTR_REQUIRES_THDPTR
if (!(field2 = new (thd->mem_root) Field_blob(
4294967295U, FALSE, field_name2, cs, TRUE)))
goto error_alloc_field2;
-#else
- if (!(field2 = new Field_blob(
- 4294967295U, FALSE, field_name2, cs, TRUE)))
- goto error_alloc_field2;
-#endif
field2->init(table);
-#ifdef SPIDER_FIELD_FIELDPTR_REQUIRES_THDPTR
if (!(i_field2 = new (thd->mem_root) Item_field(thd, (Field *) field2)))
goto error_alloc_item_field2;
-#else
- if (!(i_field2 = new Item_field((Field *) field2)))
- goto error_alloc_item_field2;
-#endif
if (i_list.push_back(i_field2))
goto error_push_item2;
-#ifdef SPIDER_FIELD_FIELDPTR_REQUIRES_THDPTR
if (!(field3 = new (thd->mem_root) Field_blob(
4294967295U, FALSE, field_name3, cs, TRUE)))
goto error_alloc_field3;
-#else
- if (!(field3 = new Field_blob(
- 4294967295U, FALSE, field_name3, cs, TRUE)))
- goto error_alloc_field3;
-#endif
field3->init(table);
-#ifdef SPIDER_FIELD_FIELDPTR_REQUIRES_THDPTR
if (!(i_field3 = new (thd->mem_root) Item_field(thd, (Field *) field3)))
goto error_alloc_item_field3;
-#else
- if (!(i_field3 = new Item_field((Field *) field3)))
- goto error_alloc_item_field3;
-#endif
if (i_list.push_back(i_field3))
goto error_push_item3;
diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc
index 46dba6abdc9..13afb5077d3 100644
--- a/storage/spider/spd_table.cc
+++ b/storage/spider/spd_table.cc
@@ -4842,7 +4842,6 @@ SPIDER_SHARE *spider_get_share(
&spider->conn_link_idx, sizeof(uint) * share->link_count,
&spider->conn_can_fo, sizeof(uchar) * share->link_bitmap_size,
&spider->connection_ids, sizeof(ulonglong) * share->link_count,
- &spider->conn_kind, sizeof(uint) * share->link_count,
&spider->db_request_id, sizeof(ulonglong) * share->link_count,
&spider->db_request_phase, sizeof(uchar) * share->link_bitmap_size,
&spider->need_mons, sizeof(int) * share->link_count,
@@ -4875,7 +4874,6 @@ SPIDER_SHARE *spider_get_share(
tmp_name += share->conn_keys_lengths[roop_count] + 1;
result_list->upd_tmp_tbl_prms[roop_count].init();
result_list->upd_tmp_tbl_prms[roop_count].field_count = 1;
- spider->conn_kind[roop_count] = SPIDER_CONN_KIND_MYSQL;
}
spider_trx_set_link_idx_for_all(spider);
@@ -4930,7 +4928,6 @@ SPIDER_SHARE *spider_get_share(
!(spider->conns[roop_count] =
spider_get_conn(share, roop_count, spider->conn_keys[roop_count],
spider->wide_handler->trx, spider, FALSE, TRUE,
- SPIDER_CONN_KIND_MYSQL,
error_num))
) {
if (
@@ -5297,7 +5294,6 @@ SPIDER_SHARE *spider_get_share(
&spider->conn_link_idx, sizeof(uint) * share->link_count,
&spider->conn_can_fo, sizeof(uchar) * share->link_bitmap_size,
&spider->connection_ids, sizeof(ulonglong) * share->link_count,
- &spider->conn_kind, sizeof(uint) * share->link_count,
&spider->db_request_id, sizeof(ulonglong) * share->link_count,
&spider->db_request_phase, sizeof(uchar) * share->link_bitmap_size,
&spider->need_mons, sizeof(int) * share->link_count,
@@ -5327,7 +5323,6 @@ SPIDER_SHARE *spider_get_share(
tmp_name += share->conn_keys_lengths[roop_count] + 1;
result_list->upd_tmp_tbl_prms[roop_count].init();
result_list->upd_tmp_tbl_prms[roop_count].field_count = 1;
- spider->conn_kind[roop_count] = SPIDER_CONN_KIND_MYSQL;
}
spider_trx_set_link_idx_for_all(spider);
@@ -5379,7 +5374,6 @@ SPIDER_SHARE *spider_get_share(
!(spider->conns[roop_count] =
spider_get_conn(share, roop_count, spider->conn_keys[roop_count],
spider->wide_handler->trx, spider, FALSE, TRUE,
- SPIDER_CONN_KIND_MYSQL,
error_num))
) {
if (
@@ -6034,11 +6028,9 @@ int spider_open_all_tables(
}
/* create conn */
- if (
- !(conn = spider_get_conn(
- &tmp_share, 0, tmp_share.conn_keys[0], trx, NULL, FALSE, FALSE,
- SPIDER_CONN_KIND_MYSQL, &error_num))
- ) {
+ if (!(conn= spider_get_conn(&tmp_share, 0, tmp_share.conn_keys[0], trx,
+ NULL, FALSE, FALSE, &error_num)))
+ {
spider_sys_index_end(table_tables);
spider_close_sys_table(thd, table_tables,
&open_tables_backup, TRUE);
@@ -6149,11 +6141,9 @@ int spider_open_all_tables(
}
/* create another conn */
- if (
- (!(conn = spider_get_conn(
- &tmp_share, 0, tmp_share.conn_keys[0], trx, spider, TRUE, FALSE,
- SPIDER_CONN_KIND_MYSQL, &error_num)))
- ) {
+ if ((!(conn= spider_get_conn(&tmp_share, 0, tmp_share.conn_keys[0], trx,
+ spider, TRUE, FALSE, &error_num))))
+ {
spider_free_tmp_dbton_handler(spider);
spider_free(trx, share, MYF(0));
delete spider;
@@ -6520,6 +6510,25 @@ int spider_panic(
DBUG_RETURN(0);
}
+static void spider_update_optimizer_costs(OPTIMIZER_COSTS *costs)
+{
+ /* Assume 1 Gigabyte network */
+ costs->disk_read_cost= IO_SIZE/(1000000000/8)*1000.00000;
+ costs->index_block_copy_cost= 0; // Not used
+
+ /*
+ The following costs are copied from ha_innodb.cc
+ The assumption is that the default storage engine used with Spider is
+ InnoDB.
+ */
+ costs->row_next_find_cost= 0.00007013;
+ costs->row_lookup_cost= 0.00076597;
+ costs->key_next_find_cost= 0.00009900;
+ costs->key_lookup_cost= 0.00079112;
+ costs->row_copy_cost= 0.00006087;
+}
+
+
int spider_db_init(
void *p
) {
@@ -6563,6 +6572,7 @@ int spider_db_init(
spider_hton->show_status = spider_show_status;
spider_hton->create_group_by = spider_create_group_by_handler;
spider_hton->table_options= spider_table_option_list;
+ spider_hton->update_optimizer_costs= spider_update_optimizer_costs;
if (my_gethwaddr((uchar *) addr))
{
@@ -9180,9 +9190,8 @@ void *spider_table_bg_sts_action(
if (!conns[spider->search_link_idx])
{
spider_get_conn(share, spider->search_link_idx,
- share->conn_keys[spider->search_link_idx],
- trx, spider, FALSE, FALSE, SPIDER_CONN_KIND_MYSQL,
- &error_num);
+ share->conn_keys[spider->search_link_idx], trx,
+ spider, FALSE, FALSE, &error_num);
if (conns[spider->search_link_idx])
{
conns[spider->search_link_idx]->error_mode = 0;
@@ -9326,9 +9335,8 @@ void *spider_table_bg_crd_action(
if (!conns[spider->search_link_idx])
{
spider_get_conn(share, spider->search_link_idx,
- share->conn_keys[spider->search_link_idx],
- trx, spider, FALSE, FALSE, SPIDER_CONN_KIND_MYSQL,
- &error_num);
+ share->conn_keys[spider->search_link_idx], trx,
+ spider, FALSE, FALSE, &error_num);
if (conns[spider->search_link_idx])
{
conns[spider->search_link_idx]->error_mode = 0;
diff --git a/storage/spider/spd_trx.cc b/storage/spider/spd_trx.cc
index f266b27c871..8e1257bad21 100644
--- a/storage/spider/spd_trx.cc
+++ b/storage/spider/spd_trx.cc
@@ -2748,13 +2748,11 @@ int spider_internal_xa_commit_by_xid(
goto error;
}
- if (
- !(conn = spider_get_conn(
- &tmp_share, 0, tmp_share.conn_keys[0], trx, NULL, FALSE, FALSE,
- SPIDER_CONN_KIND_MYSQL, &error_num)) &&
- (force_commit == 0 ||
- (force_commit == 1 && error_num != ER_XAER_NOTA))
- ) {
+ if (!(conn= spider_get_conn(&tmp_share, 0, tmp_share.conn_keys[0], trx,
+ NULL, FALSE, FALSE, &error_num)) &&
+ (force_commit == 0 ||
+ (force_commit == 1 && error_num != ER_XAER_NOTA)))
+ {
spider_sys_index_end(table_xa_member);
spider_free_tmp_share_alloc(&tmp_share);
free_root(&mem_root, MYF(0));
@@ -2977,13 +2975,11 @@ int spider_internal_xa_rollback_by_xid(
goto error;
}
- if (
- !(conn = spider_get_conn(
- &tmp_share, 0, tmp_share.conn_keys[0], trx, NULL, FALSE, FALSE,
- SPIDER_CONN_KIND_MYSQL, &error_num)) &&
- (force_commit == 0 ||
- (force_commit == 1 && error_num != ER_XAER_NOTA))
- ) {
+ if (!(conn= spider_get_conn(&tmp_share, 0, tmp_share.conn_keys[0], trx,
+ NULL, FALSE, FALSE, &error_num)) &&
+ (force_commit == 0 ||
+ (force_commit == 1 && error_num != ER_XAER_NOTA)))
+ {
spider_sys_index_end(table_xa_member);
spider_free_tmp_share_alloc(&tmp_share);
free_root(&mem_root, MYF(0));
@@ -3473,11 +3469,8 @@ int spider_end_trx(
DBUG_RETURN(error_num);
}
-int spider_check_trx_and_get_conn(
- THD *thd,
- ha_spider *spider,
- bool use_conn_kind
-) {
+int spider_check_trx_and_get_conn(THD *thd, ha_spider *spider)
+{
int error_num, roop_count, search_link_idx;
SPIDER_TRX *trx;
SPIDER_SHARE *share = spider->share;
@@ -3577,22 +3570,16 @@ int spider_check_trx_and_get_conn(
spider->conn_link_idx, roop_count, share->link_count,
SPIDER_LINK_STATUS_RECOVERY)
) {
- uint tgt_conn_kind = (use_conn_kind ? spider->conn_kind[roop_count] :
- SPIDER_CONN_KIND_MYSQL);
if (roop_count == spider->search_link_idx)
search_link_idx_is_checked = TRUE;
- if (
- tgt_conn_kind == SPIDER_CONN_KIND_MYSQL &&
- !spider->conns[roop_count]
- ) {
+ if (!spider->conns[roop_count])
+ {
*spider->conn_keys[roop_count] = first_byte;
if (
!(conn =
spider_get_conn(share, roop_count,
spider->conn_keys[roop_count], trx,
spider, FALSE, TRUE,
- use_conn_kind ? spider->conn_kind[roop_count] :
- SPIDER_CONN_KIND_MYSQL,
&error_num))
) {
if (
@@ -3672,8 +3659,6 @@ int spider_check_trx_and_get_conn(
spider_get_conn(share, roop_count,
spider->conn_keys[roop_count], trx,
spider, FALSE, TRUE,
- use_conn_kind ? spider->conn_kind[roop_count] :
- SPIDER_CONN_KIND_MYSQL,
&error_num))
) {
if (
diff --git a/storage/spider/spd_trx.h b/storage/spider/spd_trx.h
index 2055a49717e..93b03fcec21 100644
--- a/storage/spider/spd_trx.h
+++ b/storage/spider/spd_trx.h
@@ -227,11 +227,7 @@ int spider_end_trx(
SPIDER_CONN *conn
);
-int spider_check_trx_and_get_conn(
- THD *thd,
- ha_spider *spider,
- bool use_conn_kind
-);
+int spider_check_trx_and_get_conn(THD *thd, ha_spider *spider);
THD *spider_create_tmp_thd();
diff --git a/tests/check_costs.pl b/tests/check_costs.pl
new file mode 100755
index 00000000000..a89f3d07160
--- /dev/null
+++ b/tests/check_costs.pl
@@ -0,0 +1,1023 @@
+#!/usr/bin/env perl
+
+# Copyright (C) 2022 MariaDB Foundation
+# Use is subject to license terms
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
+
+# This is a test that runs queries to meassure if the MariaDB cost calculations
+# are reasonable.
+#
+# The following test are run:
+# - Full table scan of a table
+# - Range scan of the table
+# - Index scan of the table
+#
+# The output can be used to finetune the optimizer cost variables.
+#
+# The table in question is a similar to the 'lineitem' table used by DBT3
+# it has 16 field and could be regarded as a 'average kind of table'.
+# Number of fields and record length places a small role when comparing
+# index scan and table scan
+
+##################### Standard benchmark inits ##############################
+
+use DBI;
+use Getopt::Long;
+use Benchmark ':hireswallclock';
+
+package main;
+
+$opt_rows=1000000;
+$opt_test_runs= 2; # Run each test 2 times and take the average
+$opt_verbose="";
+$opt_host="";
+$opt_db="test";
+$opt_user="test";
+$opt_password="";
+$opt_socket=undef;
+$opt_skip_drop= undef;
+$opt_skip_create= undef;
+$opt_init_query= undef;
+$opt_analyze= undef;
+$opt_where_check= undef;
+$opt_engine=undef;
+$opt_comment=undef;
+$opt_table_suffix=undef;
+$opt_table_name= undef;
+$opt_grof= undef;
+$opt_all_tests=undef;
+$opt_ratios= undef;
+$opt_mysql= undef;
+$has_force_index=1;
+
+@arguments= @ARGV;
+
+GetOptions("host=s","user=s","password=s", "rows=i","test-runs=i","socket=s",
+ "db=s", "table-name=s", "skip-drop","skip-create",
+ "init-query=s","engine=s","comment=s",
+ "gprof", "one-test=s",
+ "mysql", "all-tests", "ratios", "where-check",
+ "analyze", "verbose") ||
+ die "Aborted";
+
+$Mysql::db_errstr=undef; # Ignore warnings from these
+
+my ($base_table, $table, $dbh, $where_cost, $real_where_cost, $perf_ratio);
+
+if (!$opt_mysql)
+{
+ @engines= ("aria","innodb","myisam","heap");
+}
+else
+{
+ @engines= ("innodb","myisam","heap");
+}
+
+# Special handling for some engines
+
+$no_force= 0;
+
+if (defined($opt_engine))
+{
+ if (lc($engine) eq "archive")
+ {
+ $has_force_index= 0; # Skip tests with force index
+ }
+}
+
+
+if (defined($opt_gprof) || defined($opt_one_test))
+{
+ die "one_test must be defined when --gprof is used"
+ if (!defined($opt_one_test));
+ die "engine must be defined when --gprof or --one-test is used"
+ if (!defined($opt_engine));
+ die "function '$opt_one_test' does not exist\n"
+ if (!defined(&{$opt_one_test}));
+}
+
+# We add engine_name to the table name later
+
+$opt_table_name="check_costs" if (!defined($opt_table_name));
+$base_table="$opt_db.$opt_table_name";
+
+####
+#### Start timeing and start test
+####
+
+$|= 1; # Autoflush
+if ($opt_verbose)
+{
+ $opt_analyze= 1;
+}
+
+####
+#### Create the table
+####
+
+my %attrib;
+
+$attrib{'PrintError'}=0;
+
+if (defined($opt_socket))
+{
+ $attrib{'mariadb_socket'}=$opt_socket;
+}
+
+$dbh = DBI->connect("DBI:MariaDB:$opt_db:$opt_host",
+ $opt_user, $opt_password,\%attrib) || die $DBI::errstr;
+
+print_mariadb_version();
+print "Server options: $opt_comment\n" if (defined($opt_comment));
+print "Running tests with $opt_rows rows\n";
+
+print "Program arguments:\n";
+for ($i= 0 ; $i <= $#arguments; $i++)
+{
+ my $arg=$arguments[$i];
+ if ($arg =~ / /)
+ {
+ if ($arg =~ /([^ =]*)=(.*)/)
+ {
+ print "$1=\"$2\" ";
+ }
+ else
+ {
+ print "\"$arg\"" . " ";
+ }
+ }
+ else
+ {
+ print $arguments[$i] . " ";
+ }
+}
+print "\n\n";
+
+@test_names=
+ ("table scan no where", "table scan simple where",
+ "table scan where no match", "table scan complex where", "table scan",
+ "index scan", "index scan 4 parts", "range scan", "eq_ref_index_join",
+ "eq_ref_cluster_join", "eq_ref_join", "eq_ref_btree");
+$where_tests=3; # Number of where test to be compared with test[0]
+
+if ($opt_mysql)
+{
+ create_seq_table();
+}
+
+
+if ($opt_engine || defined($opt_one_test))
+{
+ test_engine(0, $opt_engine);
+}
+else
+{
+ my $i;
+ undef($opt_skip_create);
+ for ($i= 0 ; $i <= $#engines; $i++)
+ {
+ test_engine($i, $engines[$i]);
+
+ if ($i > 0 && $opt_ratios)
+ {
+ print "\n";
+ my $j;
+
+ print "Ratios $engines[$i] / $engines[0]\n";
+ for ($j= $where_tests+1 ; $j <= $#test_names ; $j++)
+ {
+ if ($res[$i][$j])
+ {
+ my $cmp_cost= $res[0][$j]->{'cost'} - $res[0][$j]->{'where_cost'};
+ my $cmp_time= $res[0][$j]->{'time'};
+ my $cur_cost= $res[$i][$j]->{'cost'} - $res[$i][$j]->{'where_cost'};
+ my $cur_time= $res[$i][$j]->{'time'};
+
+ printf "%14.14s cost: %6.4f time: %6.4f cost_multiplier: %6.4f\n",
+ $test_names[$j],
+ $cur_cost / $cmp_cost,
+ $cur_time / $cmp_time,
+ ($cmp_cost * ($cur_time / $cmp_time))/$cur_cost;
+ }
+000000 }
+ }
+# if ($i + 1 <= $#engines)
+ {
+ print "-------------------------\n\n";
+ }
+ }
+ print_totals();
+}
+
+$dbh->do("drop table if exists $table") if (!defined($opt_skip_drop));
+$dbh->disconnect; $dbh=0; # Close handler
+exit(0);
+
+
+sub test_engine()
+{
+ my ($i, $engine)= @_;
+ my ($cur_rows);
+
+ setup_engine($engine);
+ setup($opt_init_query);
+ $table= $base_table . "_$engine";
+ if (!defined($opt_skip_create) || !check_if_table_exist($table))
+ {
+ my $index_type="";
+
+ # We should use btree index with heap to ge range scans
+ $index_type= "using btree" if (lc($engine) eq "heap");
+
+ print "Creating table $table of type $engine\n";
+ $dbh->do("drop table if exists $table");
+ $dbh->do("create table $table (
+ `l_orderkey` int(11) NOT NULL,
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL,
+ `l_extra` int(11) NOT NULL,
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ PRIMARY KEY (`l_orderkey`),
+ UNIQUE (`l_linenumber`),
+ UNIQUE (`l_extra`) $index_type,
+ KEY `l_suppkey` $index_type (l_suppkey, l_partkey),
+ KEY `long_suppkey` $index_type
+ (l_partkey, l_suppkey, l_linenumber, l_extra) )
+ ENGINE= $engine")
+ or die "Got error on CREATE TABLE: $DBI::errstr";
+ }
+ $cur_rows= get_row_count($table);
+ if ($cur_rows == 0 || !defined($opt_skip_create))
+ {
+ $dbh->do("insert into $table select
+ seq, seq/10, seq, seq, seq, seq, seq, mod(seq,10)*10,
+ 0, 'a','b',
+ date_add('2000-01-01', interval seq/500 day),
+ date_add('2000-01-10', interval seq/500 day),
+ date_add('2000-01-20', interval seq/500 day),
+ left(md5(seq),25),
+ if(seq & 1,'mail','ship'),
+ repeat('a',mod(seq,40))
+ from seq_1_to_$opt_rows")
+ or die "Got error on INSERT: $DBI::errstr";
+
+ $sth= $dbh->do("analyze table $table")
+ or die "Got error on 'analyze table: " . $dbh->errstr . "\n";
+ }
+ else
+ {
+ $opt_rows= $cur_rows;
+ die "Table $table is empty. Please run without --skip-create"
+ if ($opt_rows == 0);
+ print "Reusing old table $table, which has $opt_rows rows\n";
+ }
+
+ if (!$opt_mysql)
+ {
+ $where_cost=get_variable("optimizer_where_cost");
+ if (defined($where_cost))
+ {
+ # Calculate cost of where once. Must be done after table is created
+ $real_where_cost= get_where_cost();
+ $perf_ratio= $real_where_cost/$where_cost;
+ printf "Performance ratio compared to base computer: %6.4f\n",
+ $perf_ratio;
+ }
+ print "\n";
+ }
+ else
+ {
+ $where_cost=0.1; # mysql 'm_row_evaluate_cost'
+ }
+
+
+ if (defined($opt_one_test))
+ {
+ if (defined($opt_gprof))
+ {
+ # Argument is the name of the test function
+ test_with_gprof($opt_one_test, 10);
+ return;
+ }
+ $opt_one_test->();
+ return;
+ }
+
+ if ($opt_where_check)
+ {
+ $res[$i][0]= table_scan_without_where(0);
+ $res[$i][1]= table_scan_with_where(1);
+ $res[$i][2]= table_scan_with_where_no_match(2);
+ $res[$i][3]= table_scan_with_complex_where(3);
+ }
+ $res[$i][4]= table_scan_without_where_analyze(4);
+ $res[$i][5]= index_scan(5);
+ $res[$i][6]= index_scan_4_parts(6) if ($opt_all_tests);
+ $res[$i][7]= range_scan(7);
+ $res[$i][8]= eq_ref_index_join(8);
+ $res[$i][9]= eq_ref_clustered_join(9);
+ $res[$i][10]= eq_ref_join(10);
+ $res[$i][11]= eq_ref_join_btree(11);
+
+ if ($opt_where_check)
+ {
+ printf "Variable optimizer_where_cost: cur: %6.4f real: %6.4f prop: %6.4f\n",
+ $where_cost, $real_where_cost, $perf_ratio;
+ print "Ratio of WHERE costs compared to scan without a WHERE\n";
+ for ($j= 1 ; $j <= $where_tests ; $j++)
+ {
+ print_where_costs($i,$j,0);
+ }
+ print "\n";
+ }
+
+ print "Cost/time ratio for different scans types\n";
+ for ($j= $where_tests+1 ; $j <= $#test_names ; $j++)
+ {
+ if ($res[$i][$j])
+ {
+ print_costs($test_names[$j], $res[$i][$j]);
+ }
+ }
+}
+
+
+sub print_costs($;$)
+{
+ my ($name, $cur_res)= @_;
+
+ # Cost without where clause
+ my $cur_cost= $cur_res->{'cost'} - $cur_res->{'where_cost'};
+ my $cur_time= $cur_res->{'time'};
+
+ printf "%-20.20s cost: %9.4f time: %9.4f cost/time: %8.4f\n",
+ $name,
+ $cur_cost, $cur_time, $cur_cost/$cur_time;
+}
+
+sub print_where_costs()
+{
+ my ($index, $cmp, $base)= @_;
+
+ my $cmp_time= $res[$index][$cmp]->{'time'};
+ my $base_time= $res[$index][$base]->{'time'};
+
+ printf "%-30.30s time: %6.4f\n", $test_names[$cmp], $cmp_time / $base_time;
+}
+
+
+# Used to setup things like optimizer_switch or optimizer_cache_hit_ratio
+
+sub setup()
+{
+ my ($query)= @_;
+ my ($sth,$query);
+
+ $sth= $dbh->do("flush tables") ||
+ die "Got error on 'flush tables': " . $dbh->errstr . "\n";
+ if (defined($query))
+ {
+ $sth= $dbh->do("$query") ||
+ die "Got error on '$query': " . $dbh->errstr . "\n";
+ }
+
+ # Set variables that may interfer with timings
+ $query= "set \@\@optimizer_switch='index_condition_pushdown=off'";
+ $sth= $dbh->do($query) ||
+ die "Got error on '$query': " . $dbh->errstr . "\n";
+}
+
+
+sub setup_engine()
+{
+ my ($engine)= @_;
+ my ($sth,$query);
+
+ if (!$opt_mysql)
+ {
+ # Set variables that may interfere with timings
+ $query= "set global $engine.optimizer_disk_read_ratio=0";
+ $sth= $dbh->do($query) ||
+ die "Got error on '$query': " . $dbh->errstr . "\n";
+ }
+}
+
+sub create_seq_table
+{
+ my $name= "seq_1_to_$opt_rows";
+ my $i;
+ print "Creating $name\n";
+ $dbh->do("drop table if exists $name") ||
+ die "Error on drop: " . $dbh->errstr ."\n";
+ $dbh->do("create table $name (seq int(11) not null) engine=heap")
+ || die "Error on create: " . $dbh->errstr ."\n";
+ for ($i= 1 ; $i < $opt_rows ; $i+=10)
+ {
+ $dbh->do("insert into $name values
+ ($i),($i+1),($i+2),($i+3),($i+4),($i+5),($i+6),($i+7),($i+8),($i+9)") || die "Error on insert";
+ }
+}
+
+
+
+##############################################################################
+# Query functions
+##############################################################################
+
+# Calculate the cost of the WHERE clause
+
+sub table_scan_without_where()
+{
+ my ($query_id)= @_;
+ return run_query($test_names[$query_id],
+ "table_scan", "ALL", $opt_rows,
+"select sum(l_quantity) from $table");
+}
+
+sub table_scan_with_where()
+{
+ my ($query_id)= @_;
+ return run_query($test_names[$query_id],
+ "table_scan", "ALL", $opt_rows,
+"select sum(l_quantity) from $table where l_commitDate >= '2000-01-01' and l_tax >= 0.0");
+}
+
+sub table_scan_with_where_no_match()
+{
+ my ($query_id)= @_;
+ return run_query($test_names[$query_id],
+ "table_scan", "ALL", $opt_rows,
+"select sum(l_quantity) from $table where l_commitDate >= '2000-01-01' and l_tax > 0.0 /* NO MATCH */");
+}
+
+
+sub table_scan_with_complex_where()
+{
+ my ($query_id)= @_;
+ return run_query($test_names[$query_id],
+ "table_scan", "ALL", $opt_rows,
+"select sum(l_quantity) from $table where l_commitDate >= '2000-01-01' and l_quantity*l_extendedprice-l_discount+l_tax > 0.0");
+}
+
+# Calculate the time spent for table accesses (done with analyze statment)
+
+# Table scan
+
+sub table_scan_without_where_analyze()
+{
+ my ($query_id)= @_;
+ return run_query_with_analyze($test_names[$query_id],
+ "table_scan", "ALL", $opt_rows,
+"select sum(l_quantity) from $table");
+}
+
+# Index scan with 2 key parts
+
+sub index_scan()
+{
+ my ($query_id)= @_;
+ return 0 if (!$has_force_index);
+ my ($query_id)= @_;
+ return run_query_with_analyze($test_names[$query_id],
+ "index_scan", "index", $opt_rows,
+"select count(*) from $table force index (l_suppkey) where l_suppkey >= 0 and l_partkey >=0");
+}
+
+# Index scan with 2 key parts
+# This is to check how the number of key parts affects the timeings
+
+sub index_scan_4_parts()
+{
+ my ($query_id)= @_;
+ return 0 if (!$has_force_index);
+ return run_query_with_analyze($test_names[$query_id],
+ "index_scan_4_parts", "index", $opt_rows,
+"select count(*) from $table force index (long_suppkey) where l_linenumber >= 0 and l_extra >0");
+}
+
+sub range_scan()
+{
+ my ($query_id)= @_;
+ return 0 if (!$has_force_index);
+ return run_query_with_analyze($test_names[$query_id],
+ "range_scan", "range", $opt_rows,
+"select sum(l_orderkey) from $table force index(l_suppkey) where l_suppkey >= 0 and l_partkey >=0 and l_discount>=0.0");
+}
+
+sub eq_ref_index_join()
+{
+ my ($query_id)= @_;
+ return run_query_with_analyze($test_names[$query_id],
+ "eq_ref_index_join", "eq_ref", 1,
+"select straight_join count(*) from seq_1_to_$opt_rows,$table where seq=l_linenumber");
+}
+
+sub eq_ref_clustered_join()
+{
+ my ($query_id)= @_;
+ return run_query_with_analyze($test_names[$query_id],
+ "eq_ref_cluster_join", "eq_ref", 1,
+"select straight_join count(*) from seq_1_to_$opt_rows,$table where seq=l_orderkey");
+}
+
+sub eq_ref_join()
+{
+ my ($query_id)= @_;
+ return run_query_with_analyze($test_names[$query_id],
+ "eq_ref_join", "eq_ref", 1,
+"select straight_join count(*) from seq_1_to_$opt_rows,$table where seq=l_linenumber and l_partkey >= 0");
+}
+
+sub eq_ref_join_btree()
+{
+ my ($query_id)= @_;
+ return run_query_with_analyze($test_names[$query_id],
+ "eq_ref_btree", "eq_ref", 1,
+"select straight_join count(*) from seq_1_to_$opt_rows,$table where seq=l_extra and l_partkey >= 0");
+}
+
+
+# Calculate the cost of a basic where clause
+# This can be used to find out the speed of the current computer compared
+# to the reference computer on which the costs where calibrated.
+
+sub get_where_cost()
+{
+ my ($loop);
+ $loop=10000000;
+ # Return time in microseconds for one where (= optimizer_where_cost)
+ return query_time("select benchmark($loop, l_commitDate >= '2000-01-01' and l_tax >= 0.0) from $table limit 1")/$loop;
+}
+
+
+# Run a query to be able to calculate the costs of filter
+
+sub cost_of_filtering()
+{
+ my ($query, $cost1, $cost2);
+ do_query("set \@\@max_rowid_filter_size=10000000," .
+ "optimizer_switch='rowid_filter=on',".
+ "\@\@optimizer_scan_setup_cost=1000000");
+ do_query("set \@old_cost=\@\@aria.OPTIMIZER_ROW_LOOKUP_COST");
+ do_query("set global aria.OPTIMIZER_ROW_LOOKUP_COST=1");
+ do_query("flush tables");
+ $cost1= run_query_with_analyze("range", "range", "range", 500000,
+ "select count(l_discount) from check_costs_aria as t1 where t1.l_orderkey between 1 and 500000");
+ $cost2= run_query_with_analyze("range-all", "range-all", "range|filter", 500000,
+ "select count(l_discount) from check_costs_aria as t1 where t1.l_orderkey between 1 and 500000 and l_linenumber between 1 and 500000");
+ $cost3= run_query_with_analyze("range-none","range-none", "range|filter", 500000,
+ "select count(l_discount) from check_costs_aria as t1 where t1.l_orderkey between 1 and 500000 and l_linenumber between 500000 and 1000000");
+ do_query("set global aria.OPTIMIZER_ROW_LOOKUP_COST=\@old_cost");
+ do_query("flush tables");
+ print_costs("range", $cost1);
+ print_costs("filter-all", $cost2);
+ print_costs("filter-none", $cost3);
+}
+
+sub gprof_cost_of_filtering()
+{
+ $cost2= run_query_with_analyze("gprof","range-all", "range|filter", 500000,
+ "select count(l_discount) from check_costs_aria as t1 where t1.l_orderkey between 1 and 500000 and l_linenumber between 1 and 500000");
+}
+
+
+###############################################################################
+# Help functions for running the queries
+###############################################################################
+
+
+# Run query and return time for query in microseconds
+
+sub query_time()
+{
+ my ($query)= @_;
+ my ($start_time,$end_time,$time,$ms,$sth,$row);
+
+ $start_time= new Benchmark;
+ $sth= $dbh->prepare($query) || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $sth->execute || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $end_time=new Benchmark;
+ $row= $sth->fetchrow_arrayref();
+ $sth=0;
+
+ $time= timestr(timediff($end_time, $start_time),"nop");
+ $time =~ /([\d.]*)/;
+ return $1*1000000.0;
+}
+
+#
+# Run a query and compare the clock time
+#
+
+sub run_query()
+{
+ my ($full_name, $name, $type, $expected_rows, $query)= @_;
+ my ($start_time,$end_time,$sth,@row,%res,$i,$optimizer_rows);
+ my ($extra, $last_type, $adjust_cost, $ms);
+ $adjust_cost=1.0;
+
+ print "Timing full query: $full_name\n$query\n";
+
+ $sth= $dbh->prepare("explain $query") || die "Got error on 'explain $query': " . $dbh->errstr . "\n";
+ $sth->execute || die "Got error on 'explain $query': " . $dbh->errstr . "\n";
+
+ print "explain:\n";
+ while ($row= $sth->fetchrow_arrayref())
+ {
+ print $row->[0];
+ for ($i= 1 ; $i < @$row; $i++)
+ {
+ print " " . $row->[$i] if (defined($row->[$i]));
+ }
+ print "\n";
+
+ $extra= $row->[@$row-1];
+ $last_type= $row->[3];
+ $optimizer_rows= $row->[8];
+ }
+ if ($last_type ne $type &&
+ ($type ne "index" || !($extra =~ /Using index/)))
+ {
+ print "Warning: Wrong scan type: '$last_type', expected '$type'\n";
+ }
+
+ if ($expected_rows >= 0 &&
+ (abs($optimizer_rows - $expected_rows)/$expected_rows) > 0.1)
+ {
+ printf "Warning: Expected $expected_rows instead of $optimizer_rows from EXPLAIN. Adjusting costs\n";
+ $adjust_cost= $expected_rows / $optimizer_rows;
+ }
+
+ # Do one query to fill the cache
+ $sth= $dbh->prepare($query) || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $sth->execute || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $end_time=new Benchmark;
+ $row= $sth->fetchrow_arrayref();
+ $sth=0;
+
+ # Run query for real
+ $start_time= new Benchmark;
+ $sth= $dbh->prepare($query) || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $sth->execute || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $end_time=new Benchmark;
+ $row= $sth->fetchrow_arrayref();
+ $sth=0;
+
+ $time= timestr(timediff($end_time, $start_time),"nop");
+ $time =~ /([\d.]*)/;
+ $ms= $1*1000.0;
+
+ $query= "show status like 'last_query_cost'";
+ $sth= $dbh->prepare($query) || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $sth->execute || die "Got error on '$query': " . $dbh->errstr . "\n";;
+ $row= $sth->fetchrow_arrayref();
+ $sth=0;
+ $cost= $row->[1] * $adjust_cost;
+ printf "%10s time: %10.10s ms cost: %6.4f", $name, $ms, $cost;
+ if ($adjust_cost != 1.0)
+ {
+ printf " (was %6.4f)", $row->[1];
+ }
+ print "\n\n";
+
+ $res{'cost'}= $cost;
+ $res{'time'}= $ms;
+ return \%res;
+}
+
+#
+# Run a query and compare the table access time from analyze statement
+# The cost works for queries with one or two tables!
+#
+
+sub run_query_with_analyze()
+{
+ my ($full_name,$name, $type, $expected_rows, $query)= @_;
+ my ($start_time,$end_time,$sth,@row,%res,$i,$j);
+ my ($optimizer_rows, $optimizer_rows_first);
+ my ($adjust_cost, $ms, $second_ms, $analyze, $local_where_cost);
+ my ($extra, $last_type, $tot_ms, $found_two_tables);
+
+ $found_two_tables= 0;
+ $adjust_cost=1.0;
+ if (!$opt_mysql)
+ {
+ $local_where_cost= $where_cost/1000 * $opt_rows;
+ }
+ else
+ {
+ $local_where_cost= $where_cost * $opt_rows;
+ }
+ $optimizer_rows_first= undef;
+
+ print "Timing table access for query: $full_name\n$query\n";
+
+ $sth= $dbh->prepare("explain $query") || die "Got error on 'explain $query': " . $dbh->errstr . "\n";
+ $sth->execute || die "Got error on 'explain $query': " . $dbh->errstr . "\n";
+
+ print "explain:\n";
+ if (!$opt_mysql)
+ {
+ $type_pos= 3;
+ $row_pos= 8;
+ }
+ else
+ {
+ $type_pos= 4;
+ $row_pos= 9;
+ }
+
+ $j= 0;
+ while ($row= $sth->fetchrow_arrayref())
+ {
+ $j++;
+ print $row->[0];
+ for ($i= 1 ; $i < @$row; $i++)
+ {
+ print " " . $row->[$i] if (defined($row->[$i]));
+ # print " X" if (!defined($row->[$i]));
+ }
+ print "\n";
+
+ $extra= $row->[@$row-1];
+ $last_type= $row->[$type_pos];
+ if (!defined($optimizer_rows_first))
+ {
+ $optimizer_rows_first= $row->[$row_pos];
+ }
+ $optimizer_rows= $row->[$row_pos];
+ }
+ $found_two_tables= 1 if ($j > 1);
+
+ if ($last_type ne $type &&
+ ($type ne "index" || !($extra =~ /Using index/)))
+ {
+ print "Warning: Wrong scan type: '$last_type', expected '$type'\n";
+ }
+ if ($expected_rows >= 0 &&
+ (abs($optimizer_rows - $expected_rows)/$expected_rows) > 0.1)
+ {
+ printf "Warning: Expected $expected_rows instead of $optimizer_rows from EXPLAIN. Adjusting costs\n";
+ $adjust_cost= $expected_rows / $optimizer_rows;
+ }
+
+ # Do one query to fill the cache
+ if (!defined($opt_grof))
+ {
+ $sth= $dbh->prepare($query) || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $sth->execute || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $row= $sth->fetchrow_arrayref();
+ $sth=0;
+ }
+
+ # Run the query through analyze statement
+ $tot_ms=0;
+ if (!$opt_mysql)
+ {
+ for ($i=0 ; $i < $opt_test_runs ; $i++)
+ {
+ my ($j);
+ $sth= $dbh->prepare("analyze format=json $query" ) || die "Got error on 'analzye $query': " . $dbh->errstr . "\n";
+ $sth->execute || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $row= $sth->fetchrow_arrayref();
+ $analyze= $row->[0];
+ $sth=0;
+
+ # Fetch the timings
+ $j=0;
+ while ($analyze =~ /r_table_time_ms": ([0-9.]*)/g)
+ {
+ $tot_ms= $tot_ms+ $1;
+ $j++;
+ }
+ if ($j > 2)
+ {
+ die "Found too many tables, program needs to be extended!"
+ }
+ # Add cost of filtering
+ while ($analyze =~ /r_filling_time_ms": ([0-9.]*)/g)
+ {
+ $tot_ms= $tot_ms+ $1;
+ }
+ }
+ }
+ else
+ {
+ my $local_table= substr($table,index($table,".")+1);
+ for ($i=0 ; $i < $opt_test_runs ; $i++)
+ {
+ my ($j);
+ $sth= $dbh->prepare("explain analyze $query" ) || die "Got error on 'analzye $query': " . $dbh->errstr . "\n";
+ $sth->execute || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $row= $sth->fetchrow_arrayref();
+ $analyze= $row->[0];
+ $sth=0;
+ }
+ # Fetch the timings
+ $j=0;
+
+ if ($analyze =~ / $local_table .*actual time=([0-9.]*) .*loops=([0-9]*)/g)
+ {
+ my $times= $1;
+ my $loops= $2;
+ $times =~ /\.\.([0-9.]*)/;
+ $times= $1;
+ $times="0.005" if ($times == 0);
+ #print "time: $times \$1: $1 loops: $loops\n";
+ $tot_ms= $tot_ms+ $times*$loops;
+ $j++;
+ }
+ if ($j > 1)
+ {
+ die "Found too many tables, program needs to be extended!"
+ }
+ }
+
+
+ if ($found_two_tables)
+ {
+ # Add the cost of the where for the two tables. The last table
+ # is assumed to have $expected_rows while the first (driving table)
+ # may have less rows. Take that into account when calculalting the
+ # total where cost.
+ $local_where_cost= ($local_where_cost +
+ $local_where_cost *
+ ($optimizer_rows_first/$opt_rows));
+ }
+ $ms= $tot_ms/$opt_test_runs;
+
+ if ($opt_analyze)
+ {
+ print "\nanalyze:\n" . $analyze . "\n\n";
+ }
+
+ if (!defined($opt_grof))
+ {
+ # Get last query cost
+ $query= "show status like 'last_query_cost'";
+ $sth= $dbh->prepare($query) || die "Got error on '$query': " . $dbh->errstr . "\n";
+ $sth->execute || die "Got error on '$query': " . $dbh->errstr . "\n";;
+ $row= $sth->fetchrow_arrayref();
+ $sth=0;
+ $cost= $row->[1] * $adjust_cost;
+
+ printf "%10s time: %10.10s ms cost-where: %6.4f cost: %6.4f",
+ $name, $ms, $cost - $local_where_cost, $cost;
+ if ($adjust_cost != 1.0)
+ {
+ printf " (cost was %6.4f)", $row->[1];
+ }
+ }
+ else
+ {
+ printf "%10s time: %10.10s ms", $name, $ms;
+ $cost= 0; $local_where_cost= 0;
+ }
+ print "\n\n";
+
+ $res{'cost'}= $cost;
+ $res{'where_cost'}= $local_where_cost;
+ $res{'time'}= $ms;
+ return \%res;
+}
+
+
+sub do_query()
+{
+ my ($query)= @_;
+ $dbh->do($query) || die "Got error on '$query': " . $dbh->errstr . "\n";
+}
+
+
+sub print_totals()
+{
+ my ($i, $j);
+ print "Totals per test\n";
+ for ($j= $where_tests+1 ; $j <= $#test_names; $j++)
+ {
+ print "$test_names[$j]:\n";
+ for ($i= $0 ; $i <= $#engines ; $i++)
+ {
+ if ($res[$i][$j])
+ {
+ my $cost= $res[$i][$j]->{'cost'} - $res[$i][$j]->{'where_cost'};
+ my $ms= $res[$i][$j]->{'time'};
+ printf "%-8s %10.4f ms cost: %10.4f cost/time: %8.4f\n",
+ $engines[$i], $ms, $cost, $cost/$ms;
+ }
+ }
+ }
+}
+
+
+# This function can be used to test things with gprof
+
+sub test_with_gprof()
+{
+ my ($function_ref, $loops)= @_;
+ my ($sum, $i, $cost);
+
+ printf "Running test $function_ref $loops time\n";
+ $sum= 0; $loops=10;
+ for ($i=0 ; $i < $loops ; $i++)
+ {
+ $cost= $function_ref->();
+ $sum+= $cost->{'time'};
+ }
+ print "Average: " . ($sum/$loops) . "\n";
+ print "Shuting down server\n";
+ $dbh->do("shutdown") || die "Got error ..";
+}
+
+##############################################################################
+# Get various simple data from MariaDB
+##############################################################################
+
+sub print_mariadb_version()
+{
+ my ($query, $sth, $row);
+ $query= "select VERSION()";
+ $sth= $dbh->prepare($query) || die "Got error on '$query': " . $dbh->errstr . "\n";
+$sth->execute || die "Got error on '$query': " . $dbh->errstr . "\n";;
+ $row= $sth->fetchrow_arrayref();
+ print "Server: $row->[0]";
+
+ $query= "show variables like 'VERSION_SOURCE_REVISION'";
+ $sth= $dbh->prepare($query) || die "Got error on '$query': " . $dbh->errstr . "\n";
+$sth->execute || die "Got error on '$query': " . $dbh->errstr . "\n";;
+ $row= $sth->fetchrow_arrayref();
+ print " Commit: $row->[1]\n";
+}
+
+
+sub get_row_count()
+{
+ my ($table)= @_;
+ my ($query, $sth, $row);
+ $query= "select count(*) from $table";
+ $sth= $dbh->prepare($query) || die "Got error on '$query': " . $dbh->errstr . "\n";
+ if (!$sth->execute)
+ {
+ if (!($dbh->errstr =~ /doesn.*exist/))
+ {
+ die "Got error on '$query': " . $dbh->errstr . "\n";
+ }
+ return 0;
+ }
+ $row= $sth->fetchrow_arrayref();
+ return $row->[0];
+}
+
+
+sub get_variable()
+{
+ my ($name)= @_;
+ my ($query, $sth, $row);
+ $query= "select @@" . $name;
+ if (!($sth= $dbh->prepare($query)))
+ {
+ die "Got error on '$query': " . $dbh->errstr . "\n";
+ }
+ $sth->execute || die "Got error on '$query': " . $dbh->errstr . "\n";;
+ $row= $sth->fetchrow_arrayref();
+ return $row->[0];
+}
+
+
+sub check_if_table_exist()
+{
+ my ($name)= @_;
+ my ($query,$sth);
+ $query= "select 1 from $name limit 1";
+ $sth= $dbh->prepare($query) || die "Got error on '$query': " . $dbh->errstr . "\n";
+ print $sth->fetchrow_arrayref();
+ if (!$sth->execute || !defined($sth->fetchrow_arrayref()))
+ {
+ return 0; # Table does not exists
+ }
+ return 1;
+}
diff --git a/tests/mysql_client_fw.c b/tests/mysql_client_fw.c
index bfa7f296300..6cfe960fb04 100644
--- a/tests/mysql_client_fw.c
+++ b/tests/mysql_client_fw.c
@@ -13,6 +13,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
+#define VER "2.1"
#include <my_global.h>
#include <my_sys.h>
#include <mysql.h>
@@ -24,6 +25,7 @@
#include <mysql_version.h>
#include <sql_common.h>
#include <mysql/client_plugin.h>
+#include <welcome_copyright_notice.h>
/*
If non_blocking_api_enabled is true, we will re-define all the blocking
@@ -37,7 +39,6 @@ static my_bool non_blocking_api_enabled= 0;
#include "nonblock-wrappers.h"
#endif
-#define VER "2.1"
#define MAX_TEST_QUERY_LENGTH 300 /* MAX QUERY BUFFER LENGTH */
#define MAX_KEY MAX_INDEXES
#define MAX_SERVER_ARGS 64
@@ -1256,8 +1257,7 @@ static void usage(void)
{
/* show the usage string when the user asks for this */
putc('\n', stdout);
- printf("%s Ver %s Distrib %s, for %s (%s)\n",
- my_progname, VER, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE);
+ print_version();
puts("By Monty, Venu, Kent and others\n");
printf("\
Copyright (C) 2002-2004 MySQL AB\n\
diff --git a/tests/prev_record.cc b/tests/prev_record.cc
new file mode 100644
index 00000000000..66fe8e6a464
--- /dev/null
+++ b/tests/prev_record.cc
@@ -0,0 +1,466 @@
+/* Copyright (c) 2023 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
+
+/*
+ This program simulates the MariaDB query process execution using
+ the SCAN, EQ_REF, REF and join_cache (CACHE) row lookup methods.
+
+ The purpose is to verify that 'prev_record_reads()' function correctly
+ estimates the number of lookups we have to do for EQ_REF access
+ assuming we have 'one-row-cache' before the lookup.
+
+ The logic for the prev_record_reads() function in this file should
+ match the logic in sql_select.cc::prev_record_reads() in MariaDB 11.0
+ and above.
+
+ The program generates first a randomized plan with the above
+ methods, then executes a full 'query' processing and then lastly
+ checks that the number of EQ_REF engine lookups matches the
+ estimated number of lookups.
+
+ If the number of estimated lookups are not exact, the plan and
+ lookup numbers are printed. That a plan is printed is not to be
+ regarded as a failure. It's a failure only of the number of engine
+ calls are far greater than the number of estimated lookups.
+
+ Note that the estimated number of lookups are exact only if CACHE
+ refills == 1 and if the EQ_REF table only depends on one earlier
+ table.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <time.h>
+
+#define TABLES 21
+#define DEFAULT_TABLES 10
+#define CACHED_ROWS 10000
+#define unlikely(A) A
+
+enum JOIN_TYPE { SCAN, EQ_REF, REF, CACHE };
+const char *type[]= { "SCAN", "EQ_REF", "REF", "CACHE"};
+
+typedef unsigned long long DEPEND;
+typedef unsigned int uint;
+typedef unsigned long long ulonglong;
+
+struct TABLE
+{
+ ulonglong data;
+ JOIN_TYPE type;
+ DEPEND map;
+ DEPEND ref_depend_map;
+ uint records_in_table;
+ uint matching_records;
+ uint last_key;
+ ulonglong lookups;
+ ulonglong *cache; // join cache
+ ulong cached_records;
+ ulong flushed_caches;
+};
+
+struct POSITION
+{
+ TABLE *table;
+ JOIN_TYPE type;
+ double records;
+ double record_count;
+ double records_out;
+ double prev_record_read;
+ double same_keys;
+ ulong refills;
+};
+
+uint opt_tables= DEFAULT_TABLES;
+bool verbose=0;
+uint rand_init;
+struct TABLE table[TABLES];
+struct POSITION positions[TABLES];
+
+void do_select(uint table_index);
+
+
+static void
+prev_record_reads(POSITION *position, uint idx, DEPEND found_ref,
+ double record_count)
+{
+ double found= 1.0;
+ POSITION *pos_end= position - 1;
+ POSITION *cur_pos= position + idx;
+
+ /* Safety against const tables */
+ if (!found_ref)
+ goto end;
+
+ for (POSITION *pos= cur_pos-1; pos != pos_end; pos--)
+ {
+ if (found_ref & pos->table->map)
+ {
+ found_ref&= ~pos->table->map;
+
+ /* Found depent table */
+ if (pos->type == EQ_REF)
+ {
+ if (!found_ref)
+ found*= pos->same_keys;
+ }
+ else if (pos->type == CACHE)
+ {
+ if (!found_ref)
+ found*= pos->record_count / pos->refills;
+ }
+ break;
+ }
+ if (pos->type != CACHE)
+ {
+ /*
+ We are not depending on the curren table
+ There are 'records_out' rows with idenitical rows
+ value for our depending tables.
+ We are ignoring join_cache as in this case the
+ preceding tables row combination can change for
+ each call.
+ */
+ found*= pos->records_out;
+ }
+ else
+ found/= pos->refills;
+ }
+
+end:
+ cur_pos->record_count= record_count;
+ cur_pos->same_keys= found;
+ assert(record_count >= found);
+
+ if (unlikely(found <= 1.0))
+ cur_pos->prev_record_read= record_count;
+ else if (unlikely(found > record_count))
+ cur_pos->prev_record_read=1;
+ else
+ cur_pos->prev_record_read= record_count / found;
+ return;
+}
+
+
+void cleanup()
+{
+ for (uint i= 0; i < opt_tables ; i++)
+ {
+ free(table[i].cache);
+ table[i].cache= 0;
+ }
+}
+
+
+void intialize_tables()
+{
+ int eq_ref_tables;
+
+restart:
+ eq_ref_tables= 0;
+ for (uint i= 0; i < opt_tables ; i++)
+ {
+ if (i == 0)
+ table[i].type= SCAN;
+ else
+ table[i].type= (JOIN_TYPE) (rand() % 4);
+ table[i].records_in_table= rand() % 5+3;
+ table[i].matching_records= 2 + rand() % 3;
+ table[i].map= (DEPEND) 1 << i;
+ table[i].ref_depend_map= 0;
+
+/* The following is for testing */
+#ifdef FORCE_COMB
+ if (i == 5 || i == 6)
+ {
+ table[i].type= REF;
+ table[i].matching_records= 5;
+ }
+#endif
+ if (table[i].type != SCAN)
+ {
+ /* This just to make do_select a bit easier */
+ table[i].ref_depend_map= ((DEPEND) 1) << (rand() % i);
+ if (rand() & 1)
+ {
+ uint second_depend= rand() % i;
+ if (!(table[i].ref_depend_map & second_depend))
+ table[i].ref_depend_map|= ((DEPEND) 1) << second_depend;
+ }
+ }
+
+ if (table[i].type == EQ_REF)
+ {
+ table[i].matching_records= 1;
+ eq_ref_tables++;
+ }
+ else if (table[i].type != REF)
+ table[i].matching_records= table[i].records_in_table;
+
+ table[i].last_key= 0;
+ table[i].lookups= 0;
+ table[i].cached_records= 0;
+ table[i].flushed_caches= 0;
+ table[i].cache= 0;
+ if (table[i].type == CACHE)
+ table[i].cache= (ulonglong*) malloc(CACHED_ROWS *
+ sizeof(table[i].data) * i);
+ }
+
+ /* We must have at least one EQ_REF table */
+ if (!eq_ref_tables)
+ {
+ cleanup();
+ goto restart;
+ }
+}
+
+
+void optimize_tables()
+{
+ double record_count= 1.0, records;
+
+ for (uint i= 0; i < opt_tables ; i++)
+ {
+ TABLE *tab= table+i;
+ positions[i].refills= 0;
+
+ switch (tab->type) {
+ case SCAN:
+ records= tab->records_in_table;
+ break;
+ case EQ_REF:
+ records= 1.0;
+ prev_record_reads(positions, i, tab->ref_depend_map, record_count);
+ break;
+ case REF:
+ records= tab->matching_records;
+ break;
+ case CACHE:
+ records= tab->records_in_table;
+ positions[i].refills= (record_count + CACHED_ROWS-1)/ CACHED_ROWS;
+ break;
+ default:
+ assert(0);
+ }
+ positions[i].table= table + i;
+ positions[i].type= table[i].type;
+ positions[i].records= records;
+ positions[i].record_count= record_count;
+ positions[i].records_out= records;
+
+ record_count*= records;
+ }
+}
+
+
+
+void process_join_cache(TABLE *tab, uint table_index)
+{
+ if (!tab->cached_records)
+ return;
+
+#ifdef PRINT_CACHE
+ putc('>', stdout);
+ for (uint k= 0 ; k < table_index ; k++)
+ {
+ printf("%8lld ", tab->cache[k]);
+ }
+ putc('\n',stdout);
+ putc('<', stdout);
+ for (uint k= 0 ; k < table_index ; k++)
+ {
+ printf("%8lld ", tab->cache[k+(tab->cached_records-1)*table_index]);
+ }
+ putc('\n',stdout);
+#endif
+
+ for (uint k= 0 ; k < tab->records_in_table; k++)
+ {
+ table[table_index].data= k+1;
+ ulonglong *cache= tab->cache;
+ for (uint i= 0 ; i < tab->cached_records ; i++)
+ {
+ for (uint j= 0 ; j < table_index ; j++)
+ table[j].data= *cache++;
+ do_select(table_index+1);
+ }
+ }
+ tab->flushed_caches++;
+ tab->cached_records= 0;
+}
+
+/*
+ Calculate a key depending on multiple tables
+*/
+
+ulonglong calc_ref_key(DEPEND depend_map)
+{
+ ulonglong value= 1;
+ TABLE *t= table;
+
+ do
+ {
+ if (t->map & depend_map)
+ {
+ depend_map&= ~t->map;
+ value*= t->data;
+ }
+ t++;
+ } while (depend_map);
+ return value;
+}
+
+
+void do_select(uint table_index)
+{
+ if (table_index == opt_tables)
+ return;
+
+ TABLE *tab= table + table_index;
+ switch (tab->type) {
+ case SCAN:
+ for (uint i= 1 ; i <= tab->records_in_table ; i++)
+ {
+ tab->data= i;
+ do_select(table_index+1);
+ }
+ break;
+ case REF:
+ {
+ ulonglong ref_key= calc_ref_key(tab->ref_depend_map);
+ for (uint i=1 ; i <= tab->matching_records ; i++)
+ {
+ tab->data= ref_key * tab->matching_records + i;
+ do_select(table_index+1);
+ }
+ break;
+ }
+ case EQ_REF:
+ {
+ ulonglong ref_key= calc_ref_key(tab->ref_depend_map);
+ if (ref_key != tab->last_key)
+ {
+ tab->lookups++;
+#ifdef PRINT_EQ_KEY
+ if (table_index == 9)
+ printf("ref_key: %lld\n", ref_key);
+#endif
+ tab->last_key= ref_key;
+ tab->data= ref_key * tab->matching_records;
+ }
+ else
+ {
+ assert(tab->lookups != 0);
+ }
+ do_select(table_index+1);
+ break;
+ }
+ case CACHE:
+ {
+ ulonglong *cache= tab->cache + tab->cached_records * table_index;
+ for (uint i= 0 ; i <= table_index ; i++)
+ *cache++ = table[i].data;
+ if (++tab->cached_records == CACHED_ROWS)
+ process_join_cache(tab, table_index);
+ break;
+ }
+ default:
+ break;
+ }
+ return;
+}
+
+
+void do_select_end(uint table_index)
+{
+ if (table_index == opt_tables)
+ return;
+
+ TABLE *tab= table + table_index;
+ switch (tab->type) {
+ case CACHE:
+ process_join_cache(tab, table_index);
+ break;
+ default:
+ break;
+ }
+ do_select_end(table_index+1);
+}
+
+
+void execute()
+{
+ do_select(0);
+ do_select_end(0);
+}
+
+int check_prev_records()
+{
+ int errors= 0;
+ for (uint i= 0; i < opt_tables ; i++)
+ {
+ TABLE *tab= table + i;
+ if (tab->type == EQ_REF)
+ {
+ if (positions[i].prev_record_read != (double) tab->lookups)
+ {
+ fprintf(stdout, "table: %d lookups: %lld prev_record_read: %g\n",
+ i, tab->lookups, positions[i].prev_record_read);
+ errors++;
+ }
+ }
+ }
+ if (errors || verbose)
+ {
+ fprintf(stdout, "tables: %u\n", opt_tables);
+ fprintf(stdout, "rand_init: %u\n", rand_init);
+ fprintf(stdout, "cache_size: %u\n", (uint) CACHED_ROWS);
+ for (uint i= 0; i < opt_tables ; i++)
+ {
+ TABLE *tab= table + i;
+ fprintf(stdout, "table: %2d (%3lx) type: %-6s comb: %3lg out: %2lg lookups: %lld prev: %lg depend: %llx\n",
+ i, (uint) 1 << i, type[tab->type], positions[i].record_count,
+ positions[i].records_out, tab->lookups,
+ positions[i].prev_record_read, tab->ref_depend_map);
+ }
+ }
+ return errors;
+}
+
+
+int main(int argc, char **argv)
+{
+ if (argc > 1)
+ {
+ opt_tables=atoi(argv[1]);
+ if (opt_tables <= 3)
+ opt_tables= 3;
+ if (opt_tables > TABLES)
+ opt_tables= TABLES;
+ }
+ if (argc > 2)
+ rand_init= atoi(argv[2]);
+ else
+ rand_init= (uint) time(0);
+ srand(rand_init);
+
+ intialize_tables();
+ optimize_tables();
+ execute();
+ cleanup();
+ exit(check_prev_records() > 0);
+}